diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 295b6ef8922..2c22b3f7191 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -243,6 +243,7 @@ jobs: 'vertexai', 'scorers_tests', 'pandas-test', + 'huggingface', ] fail-fast: false services: diff --git a/noxfile.py b/noxfile.py index dff1305f21f..69c9c83e6c8 100644 --- a/noxfile.py +++ b/noxfile.py @@ -50,6 +50,7 @@ def lint(session): "vertexai", "scorers_tests", "pandas-test", + "huggingface", ], ) def tests(session, shard): diff --git a/pyproject.toml b/pyproject.toml index 99c534303b6..6191f153977 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -80,6 +80,7 @@ notdiamond = ["notdiamond>=0.3.21", "litellm<=1.49.1"] openai = ["openai>=1.0.0"] pandas-test = ["pandas>=2.2.3"] modal = ["modal", "python-dotenv"] +huggingface = ["huggingface-hub>=0.26.2"] vertexai = ["vertexai>=1.70.0"] test = [ "nox", diff --git a/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_chat_completion.yaml b/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_chat_completion.yaml new file mode 100644 index 00000000000..fcde0c224d3 --- /dev/null +++ b/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_chat_completion.yaml @@ -0,0 +1,53 @@ +interactions: +- request: + body: '{"model": "meta-llama/Llama-3.2-11B-Vision-Instruct", "messages": [{"role": + "user", "content": [{"type": "image_url", "image_url": {"url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"}}, + {"type": "text", "text": "Describe this image in one sentence."}]}], "max_tokens": + 500, "seed": 42, "stream": false}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate, br, zstd + Connection: + - keep-alive + Content-Length: + - '351' + Content-Type: + - application/json + X-Amzn-Trace-Id: + - 2f329b59-35c3-4276-9f7a-83609a5dc417 + user-agent: + - unknown/None; hf_hub/0.26.2; python/3.9.12; torch/2.4.1 + method: POST + uri: https://api-inference.huggingface.co/models/meta-llama/Llama-3.2-11B-Vision-Instruct/v1/chat/completions + response: + body: + string: '{"object":"chat.completion","id":"","created":1730223736,"model":"meta-llama/Llama-3.2-11B-Vision-Instruct","system_fingerprint":"2.3.1-dev0-sha-de90261","choices":[{"index":0,"message":{"role":"assistant","content":"The + image shows the Statue of Liberty, an iconic monument symbolizing freedom + and democracy in the city of New York, USA."},"logprobs":null,"finish_reason":"stop"}],"usage":{"prompt_tokens":44,"completion_tokens":26,"total_tokens":70}}' + headers: + Connection: + - keep-alive + Content-Length: + - '452' + Content-Type: + - application/json + Date: + - Tue, 29 Oct 2024 17:43:37 GMT + access-control-allow-credentials: + - 'true' + vary: + - Origin, Access-Control-Request-Method, Access-Control-Request-Headers + x-compute-time: + - '2.186847565' + x-compute-type: + - cache + x-request-id: + - qIjxpXjuA51Xjz5ochgWW + x-sha: + - cee5b78e6faed15d5f2e6d8a654fd5b247c0d5ca + status: + code: 200 + message: OK +version: 1 diff --git a/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_chat_completion_async.yaml b/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_chat_completion_async.yaml new file mode 100644 index 00000000000..0cda40cafc8 --- /dev/null +++ b/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_chat_completion_async.yaml @@ -0,0 +1,38 @@ +interactions: +- request: + body: null + headers: + user-agent: + - unknown/None; hf_hub/0.26.2; python/3.9.12; torch/2.4.1 + method: POST + uri: https://api-inference.huggingface.co/models/meta-llama/Llama-3.2-11B-Vision-Instruct/v1/chat/completions + response: + body: + string: '{"object":"chat.completion","id":"","created":1730223736,"model":"meta-llama/Llama-3.2-11B-Vision-Instruct","system_fingerprint":"2.3.1-dev0-sha-de90261","choices":[{"index":0,"message":{"role":"assistant","content":"The + image shows the Statue of Liberty, an iconic monument symbolizing freedom + and democracy in the city of New York, USA."},"logprobs":null,"finish_reason":"stop"}],"usage":{"prompt_tokens":44,"completion_tokens":26,"total_tokens":70}}' + headers: + Access-Control-Allow-Credentials: + - 'true' + Connection: + - keep-alive + Content-Length: + - '452' + Content-Type: + - application/json + Date: + - Tue, 29 Oct 2024 17:51:12 GMT + Vary: + - Origin, Access-Control-Request-Method, Access-Control-Request-Headers + x-compute-time: + - '2.186847565' + x-compute-type: + - cache + x-request-id: + - 6hX8mv6YF99wfUe2DciQt + x-sha: + - cee5b78e6faed15d5f2e6d8a654fd5b247c0d5ca + status: + code: 200 + message: OK +version: 1 diff --git a/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_chat_completion_stream.yaml b/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_chat_completion_stream.yaml new file mode 100644 index 00000000000..5f1d6640ed9 --- /dev/null +++ b/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_chat_completion_stream.yaml @@ -0,0 +1,136 @@ +interactions: +- request: + body: '{"model": "meta-llama/Llama-3.2-11B-Vision-Instruct", "messages": [{"role": + "user", "content": [{"type": "image_url", "image_url": {"url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"}}, + {"type": "text", "text": "Describe this image in one sentence."}]}], "max_tokens": + 500, "seed": 42, "stream": true}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate, br, zstd + Connection: + - keep-alive + Content-Length: + - '350' + Content-Type: + - application/json + X-Amzn-Trace-Id: + - 79b38aa4-bde0-41bd-8b8b-aee89d2823c2 + user-agent: + - unknown/None; hf_hub/0.26.2; python/3.9.12; torch/2.4.1 + method: POST + uri: https://api-inference.huggingface.co/models/meta-llama/Llama-3.2-11B-Vision-Instruct/v1/chat/completions + response: + body: + string: 'data: {"object":"chat.completion.chunk","id":"","created":1730224061,"model":"meta-llama/Llama-3.2-11B-Vision-Instruct","system_fingerprint":"2.3.1-dev0-sha-de90261","choices":[{"index":0,"delta":{"role":"assistant","content":"The"},"logprobs":null,"finish_reason":null}],"usage":null} + + + data: {"object":"chat.completion.chunk","id":"","created":1730224061,"model":"meta-llama/Llama-3.2-11B-Vision-Instruct","system_fingerprint":"2.3.1-dev0-sha-de90261","choices":[{"index":0,"delta":{"role":"assistant","content":" + image"},"logprobs":null,"finish_reason":null}],"usage":null} + + + data: {"object":"chat.completion.chunk","id":"","created":1730224061,"model":"meta-llama/Llama-3.2-11B-Vision-Instruct","system_fingerprint":"2.3.1-dev0-sha-de90261","choices":[{"index":0,"delta":{"role":"assistant","content":" + showcases"},"logprobs":null,"finish_reason":null}],"usage":null} + + + data: {"object":"chat.completion.chunk","id":"","created":1730224061,"model":"meta-llama/Llama-3.2-11B-Vision-Instruct","system_fingerprint":"2.3.1-dev0-sha-de90261","choices":[{"index":0,"delta":{"role":"assistant","content":" + the"},"logprobs":null,"finish_reason":null}],"usage":null} + + + data: {"object":"chat.completion.chunk","id":"","created":1730224061,"model":"meta-llama/Llama-3.2-11B-Vision-Instruct","system_fingerprint":"2.3.1-dev0-sha-de90261","choices":[{"index":0,"delta":{"role":"assistant","content":" + iconic"},"logprobs":null,"finish_reason":null}],"usage":null} + + + data: {"object":"chat.completion.chunk","id":"","created":1730224062,"model":"meta-llama/Llama-3.2-11B-Vision-Instruct","system_fingerprint":"2.3.1-dev0-sha-de90261","choices":[{"index":0,"delta":{"role":"assistant","content":" + Statue"},"logprobs":null,"finish_reason":null}],"usage":null} + + + data: {"object":"chat.completion.chunk","id":"","created":1730224062,"model":"meta-llama/Llama-3.2-11B-Vision-Instruct","system_fingerprint":"2.3.1-dev0-sha-de90261","choices":[{"index":0,"delta":{"role":"assistant","content":" + of"},"logprobs":null,"finish_reason":null}],"usage":null} + + + data: {"object":"chat.completion.chunk","id":"","created":1730224062,"model":"meta-llama/Llama-3.2-11B-Vision-Instruct","system_fingerprint":"2.3.1-dev0-sha-de90261","choices":[{"index":0,"delta":{"role":"assistant","content":" + Liberty"},"logprobs":null,"finish_reason":null}],"usage":null} + + + data: {"object":"chat.completion.chunk","id":"","created":1730224062,"model":"meta-llama/Llama-3.2-11B-Vision-Instruct","system_fingerprint":"2.3.1-dev0-sha-de90261","choices":[{"index":0,"delta":{"role":"assistant","content":" + in"},"logprobs":null,"finish_reason":null}],"usage":null} + + + data: {"object":"chat.completion.chunk","id":"","created":1730224062,"model":"meta-llama/Llama-3.2-11B-Vision-Instruct","system_fingerprint":"2.3.1-dev0-sha-de90261","choices":[{"index":0,"delta":{"role":"assistant","content":" + New"},"logprobs":null,"finish_reason":null}],"usage":null} + + + data: {"object":"chat.completion.chunk","id":"","created":1730224063,"model":"meta-llama/Llama-3.2-11B-Vision-Instruct","system_fingerprint":"2.3.1-dev0-sha-de90261","choices":[{"index":0,"delta":{"role":"assistant","content":" + York"},"logprobs":null,"finish_reason":null}],"usage":null} + + + data: {"object":"chat.completion.chunk","id":"","created":1730224064,"model":"meta-llama/Llama-3.2-11B-Vision-Instruct","system_fingerprint":"2.3.1-dev0-sha-de90261","choices":[{"index":0,"delta":{"role":"assistant","content":" + City"},"logprobs":null,"finish_reason":null}],"usage":null} + + + data: {"object":"chat.completion.chunk","id":"","created":1730224064,"model":"meta-llama/Llama-3.2-11B-Vision-Instruct","system_fingerprint":"2.3.1-dev0-sha-de90261","choices":[{"index":0,"delta":{"role":"assistant","content":","},"logprobs":null,"finish_reason":null}],"usage":null} + + + data: {"object":"chat.completion.chunk","id":"","created":1730224064,"model":"meta-llama/Llama-3.2-11B-Vision-Instruct","system_fingerprint":"2.3.1-dev0-sha-de90261","choices":[{"index":0,"delta":{"role":"assistant","content":" + set"},"logprobs":null,"finish_reason":null}],"usage":null} + + + data: {"object":"chat.completion.chunk","id":"","created":1730224064,"model":"meta-llama/Llama-3.2-11B-Vision-Instruct","system_fingerprint":"2.3.1-dev0-sha-de90261","choices":[{"index":0,"delta":{"role":"assistant","content":" + against"},"logprobs":null,"finish_reason":null}],"usage":null} + + + data: {"object":"chat.completion.chunk","id":"","created":1730224064,"model":"meta-llama/Llama-3.2-11B-Vision-Instruct","system_fingerprint":"2.3.1-dev0-sha-de90261","choices":[{"index":0,"delta":{"role":"assistant","content":" + the"},"logprobs":null,"finish_reason":null}],"usage":null} + + + data: {"object":"chat.completion.chunk","id":"","created":1730224064,"model":"meta-llama/Llama-3.2-11B-Vision-Instruct","system_fingerprint":"2.3.1-dev0-sha-de90261","choices":[{"index":0,"delta":{"role":"assistant","content":" + vibrant"},"logprobs":null,"finish_reason":null}],"usage":null} + + + data: {"object":"chat.completion.chunk","id":"","created":1730224064,"model":"meta-llama/Llama-3.2-11B-Vision-Instruct","system_fingerprint":"2.3.1-dev0-sha-de90261","choices":[{"index":0,"delta":{"role":"assistant","content":" + skyline"},"logprobs":null,"finish_reason":null}],"usage":null} + + + data: {"object":"chat.completion.chunk","id":"","created":1730224064,"model":"meta-llama/Llama-3.2-11B-Vision-Instruct","system_fingerprint":"2.3.1-dev0-sha-de90261","choices":[{"index":0,"delta":{"role":"assistant","content":" + of"},"logprobs":null,"finish_reason":null}],"usage":null} + + + data: {"object":"chat.completion.chunk","id":"","created":1730224064,"model":"meta-llama/Llama-3.2-11B-Vision-Instruct","system_fingerprint":"2.3.1-dev0-sha-de90261","choices":[{"index":0,"delta":{"role":"assistant","content":" + Manhattan"},"logprobs":null,"finish_reason":null}],"usage":null} + + + data: {"object":"chat.completion.chunk","id":"","created":1730224064,"model":"meta-llama/Llama-3.2-11B-Vision-Instruct","system_fingerprint":"2.3.1-dev0-sha-de90261","choices":[{"index":0,"delta":{"role":"assistant","content":"."},"logprobs":null,"finish_reason":null}],"usage":null} + + + data: {"object":"chat.completion.chunk","id":"","created":1730224064,"model":"meta-llama/Llama-3.2-11B-Vision-Instruct","system_fingerprint":"2.3.1-dev0-sha-de90261","choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":"stop"}],"usage":null} + + + data: [DONE] + + + ' + headers: + Connection: + - keep-alive + Content-Length: + - '6376' + Content-Type: + - text/event-stream + Date: + - Tue, 29 Oct 2024 17:48:20 GMT + access-control-allow-credentials: + - 'true' + vary: + - Origin, Access-Control-Request-Method, Access-Control-Request-Headers + x-compute-type: + - cache + x-request-id: + - j58E8aC66Ja4p8kkYMwz5 + x-sha: + - cee5b78e6faed15d5f2e6d8a654fd5b247c0d5ca + status: + code: 200 + message: OK +version: 1 diff --git a/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_document_question_answering.yaml b/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_document_question_answering.yaml new file mode 100644 index 00000000000..c29a4cecda4 --- /dev/null +++ b/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_document_question_answering.yaml @@ -0,0 +1,540 @@ +interactions: +- request: + body: null + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate, br, zstd + Connection: + - keep-alive + User-Agent: + - python-requests/2.32.3 + X-Amzn-Trace-Id: + - dab98c1a-2513-47bc-b495-f14b7d0073c0 + method: GET + uri: https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png + response: + body: + string: !!binary | + iVBORw0KGgoAAAANSUhEUgAAAu4AAAQlCAMAAADePLi1AAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJ + bWFnZVJlYWR5ccllPAAAA+5pVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADw/eHBhY2tldCBiZWdp + bj0i77u/IiBpZD0iVzVNME1wQ2VoaUh6cmVTek5UY3prYzlkIj8+IDx4OnhtcG1ldGEgeG1sbnM6 + eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IkFkb2JlIFhNUCBDb3JlIDUuNi1jMTQ1IDc5LjE2 + MzQ5OSwgMjAxOC8wOC8xMy0xNjo0MDoyMiAgICAgICAgIj4gPHJkZjpSREYgeG1sbnM6cmRmPSJo + dHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4gPHJkZjpEZXNjcmlw + dGlvbiByZGY6YWJvdXQ9IiIgeG1sbnM6eG1wTU09Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEu + MC9tbS8iIHhtbG5zOnN0UmVmPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvc1R5cGUvUmVz + b3VyY2VSZWYjIiB4bWxuczp4bXA9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC8iIHhtbG5z + OmRjPSJodHRwOi8vcHVybC5vcmcvZGMvZWxlbWVudHMvMS4xLyIgeG1wTU06RG9jdW1lbnRJRD0i + eG1wLmRpZDoxM0YzN0MzOUJGRjExMUVBODJDREU2NTMzRjY0MTBGMSIgeG1wTU06SW5zdGFuY2VJ + RD0ieG1wLmlpZDoxM0YzN0MzOEJGRjExMUVBODJDREU2NTMzRjY0MTBGMSIgeG1wOkNyZWF0b3JU + b29sPSJJbnZvaWNlSG9tZS5jb20iPiA8eG1wTU06RGVyaXZlZEZyb20gc3RSZWY6aW5zdGFuY2VJ + RD0idXVpZDo4MGU0MmU2OC1lZjRmLTExZjUtMDAwMC0zNzM0YTAxMTJjNWEiIHN0UmVmOmRvY3Vt + ZW50SUQ9InV1aWQ6ODBlNDJlNjgtZWY0Zi0xMWY1LTAwMDAtMzczNGEwMTEyYzVhIi8+IDxkYzpj + cmVhdG9yPiA8cmRmOlNlcT4gPHJkZjpsaT5JbnZvaWNlSG9tZS5jb208L3JkZjpsaT4gPC9yZGY6 + U2VxPiA8L2RjOmNyZWF0b3I+IDxkYzp0aXRsZT4gPHJkZjpBbHQ+IDxyZGY6bGkgeG1sOmxhbmc9 + IngtZGVmYXVsdCI+VW50aXRsZWQ8L3JkZjpsaT4gPC9yZGY6QWx0PiA8L2RjOnRpdGxlPiA8L3Jk + ZjpEZXNjcmlwdGlvbj4gPC9yZGY6UkRGPiA8L3g6eG1wbWV0YT4gPD94cGFja2V0IGVuZD0iciI/ + PkiJ1HYAAAAwUExURebk5qKjp9FRRlFVaQAAACAtWGdoe+ShkLa1vTw/Stl1Y+/Kvnl5j42LlczM + zP///yeDOCAAAFr7SURBVHja7J2JgqMoEIZhOAZ6Ad//bbcO8Irpa9Idk/z/7nQSowb1oywQqswE + QS8jg1MAAXcIAu4QBNwhCLhDEHCHIOAOQcAdgoA7BAF3CALuEATcIeAOQcAdgoA7BAF3CALuEATc + IQi4QxBwhyDgDkHAHYKAOwTcIQi4QxBwhyDgDkHAHYKAOwQBdwgC7hAE3CEIuEMQcIeAOwQBdwgC + 7hAE3CEIuEMQcIcg4A5BwB2CgDsEAXcIAu4QcIcg4A5BwL3LdukO0vho01ihL2r0tu2/m1YLlxK0 + Gp3L2cWyWdHYizVpYV/XbdddyrERrjP0T7ib0KUoufExuLFG1s+F3pbx5XoPsS9rA/ZlHyHkFaEp + 7NZ8Z90YDoV72NkxNKZ10dsHwn2G0i2420tg5+9z/1h3hDpzHffr6x7jnoH7mVFvaXdTTqmZR8Hd + XuLexnfpEvd+NyjXGb3A/Z11gfvDoW5tvfQ+adGPIH973OMl7qY7NqFe7iAe2uu1W7TH3b6zLnB/ + KNivNLVmK9/Oj3u+xH3+ulx65FID2iGl5RD3ue5sVIH7o6l9APvc03Fq3IfHssY97iz/ykbbD1uY + O9zLMdHA/cFgt59UOznu9RL3su+2WRa19c5ojRjzbk873N16XbdrMqBn5jncmJ8C/gdwd5e420vu + 4mqJ3ZrhuN3TFve2vYvYbZNhbOqotT9r298PPZBp7z68OTHunek17gdd5+uOmbL72m3o325sdz1A + dVM1Drwm6HSm3X5Z7cS4pwvczb6Xcn4KFY8g3fbTb3Hf+0VmU8mA+9OZ9oMH8ufCvVzgPn8/90S2 + cNBxU3dfasXZ4n4BtHv3W+hsSvZbuo1D8xO4u+u4l30/pF1/aw93fox72TcCEnB/TkfmaMjUmXBX + v2KD+0XXjF1DCtxB++/w/iO42wvc6x73clAzgDto/2HefwT3eIH7RU9kXD8dAu6g/Xd4vy3uzi02 + fIP7xSAxd+DmA/cnl7X35v22uJe4gLnBfR7oYg/6If8NdwfcH6VPpv4z7ulcuNulR3GD+76vsW0+ + /hPuNhYROiJPrmZvoHQq3NtC2xb3HYfp0PP/Fu4bAffnddxv8Xz1trjH7qTkC9x3PZH1cNQXcIfj + /rPu+61xj7MjvcXdbgfqls3IXOD+Ao77rXBPZ8K9zj65O56jYdZYbgdPAne4Mj/sztwa9zZzvMXd + HI4fiz+Fe8hDq3nb0BMY938Lo3Jr3Me7Pe5bpE047La5Ie6Y2/F8vTI3MO83x3123q/gXtb9kBa4 + o536m63Vm+NuB4073Mvaq7aH01qBO4z7D5v3m+M+O+873DeDxOqWROAOz/13vPeb4z7eth3um57I + chijA7ijW+aHzfvtce8o27jFsq2h3fZDAncY91/qe7897p3OuMN90xPpDgOO3RB3V+Is4P5cDdV/ + aazeHvc++NG5KyMX67Jx/THc8ZjpeRuq/+LN3B733VSPi3Hp5aIfErjDl/klb+YHcK/HuK96Iu1x + bOzv4T4CgyPwxvl9mXoU7Ld+Z4k5De7pGHe79ETu+iExm+lV+mXSZRjUdLEsHcV7T7uw2O00uE/5 + EPfVILGym6cN3J/ada9rRg998HZhttt+HbNpAdQT4R4PcTcLte5KZCTg/tyue9p1wS8e+AiRmi42 + S0d3iW877z+Bez3EfTWp9FqnDXB/LdyNNr3MzHKa9kvs5ZJ/ebD6E7i393G30z6CHnB/jZbqDneJ + dc7N0DZYNuLG0BIz/JnLJf/UVv0J3DddkQvuo2ummn2mJuD+Ii3V/afx0ehNoA1Pv9r+3eU6J8S9 + HOI+fJyS9k/3gfsr4t5mfmtS023m76kpKu785Tr/1jXzI7jbQ9wHtrHuU3kA92fumLmGu9lmiDbi + j7e67143R+9Phbs5xL3NOWau4V73O09fwB1xZh4Y98bv08pdqfJkaWPRT4v72nlfPezPIbzfiI27 + +8Bn0hnMG+Op6uPh3jY98jPuI8nQFvdtr/2ZcC+HuLvD3JCr1UeGvLIZu7vFvV7Jspon4P7YuKeB + ++jCSPUC93RK3NMh7rvB6OmiEdu3T++lItt9OQefjMAduN8J91Wq3xXuu4yo7cKrJ4bJY4u7bXd5 + zFa7rrXs7hWXgTcQe+PxnBlDOsT9pL77FI9wt1dz+7rjdKjtCPf43rrIq/rAPTNp6ZkR0JN0vz9A + U3U1juAgL/xFSuHdN+E4fGonur23LrJmPw7u7aKTcQV0t/RrxKfz4t6OcDdHgL6DaTvE/ThLvAHu + J9X7j5nq7jHT1Af6dtwv1zkl7ot7sp515I77Ia+4M3Y6xv1o3TQB90fDXQcIVB4y0AcIrIhuy5LV + OuccM7Pmrlyz4dsxbcZdo/0g5fb1dYH7+fT+ELHUU6bOA8LMnEQ1zcPI1uucCHd32Sy9ivt+xPJ2 + zp9r03Xcr6/r0FQ9ndLWnnelgawxq3vAaklfdLnOXca7j7AWiprGuXD14nu3NuLJxXfiYZjq5rAZ + a8+sHWxxbd26/oVZBcydAve2b8GaC4/HXDRxzYFX9Ou4/1QzPqXUzO3Xhe54TetV3K1ev/XEvHlJ + vVhyx8l7EPSNtuqi9aL9ROyPl/x2JAII+npb9e5hxIA79HvO+8OGVYKgb3gzjxo0D4Lu6M0Y4A6d + tm/m4QNeQ9D9vJkG3KHXaaxOwB16GW+mAXfodcz7BNyhl/HeG3CHXsa8pwm4Qy9j3htwh16mtZom + 4A6dXvdvpwJ36NHcGQPcoZdxZ9oE3KHH6J2p/wp7TRNwh16G9wm4Qy/DuwHu0CPxfl/agTv0IPa9 + 3oB24A49iH1Pt0AVuEO/q3afPhngDj0O720C7tBDyqR7uO3AHXoMA99uV9O+2+Iolf4r5nh/jb+9 + HsevLqFTJ1tQ42Dgf7yN+o+4Zy/aNyCyhryO+m29srH36/cNVx/A/wrs/4J7aZcd/8lretPiXUtl + TfVG64fBAbgD+F+C/V9w74bdqRXnF0umWnkvPnbDTcxnM1Uf5RtDN4XQJpepYjgfFHczeZflRhEP + 7hfQEwP/gQ9/+3Dm/+bM5Mn63MiKRx8b4UtLo+KeY8n0dfXZytc+Zu+oUpRKq9ESWqqrBj8Z752j + lXmrq3cE6EmJv2LjfyZ0/z/gHgIBPBmTAvOceVZV8q47M15xDmStszeW3zPJxsy45+HMGMPvaZkn + Q4+W6wsSv0c+tZ9KU/GvzowRM9/ED8lTm3Ev7N0zzMHn0Cr7O1QrmnxW3OMB7rj2L8u8IepZxvyk + vftX3DN57Nm3ZuWtmXEXa16lIZoqN11noId1j4fWvZBrD/sOnQ53kYnekWFv5Mwkz+Aq75FhZqc9 + +cAdNJVMf6ZlVAOyUm97xZj8gjutRS1ecvDRXIXOhXsM7JeIZx5KsOLMWO6gEZfcSpd7JHemSlcM + VYrANYCqhIvZSM9M75R3gSw8fZWzuPyZV0bXJHQu3L9YOa4+cYIg4A5Bj4t7q2h+Qi+DOwQBdwgC + 7hAE3CEIuEMQcIcg4A4BdwgC7hAE3IciD1xMOev4AIkukHIo8qk5HuaV3Ie77+P4HQZBQqfGXcbB + NJ5vx4TLgF7Ln2Rgb+LhkVP9cGxjC7qGL7gU0IlxDzLgNwQetjv1Sao8+rd6I7gnqQONI9Iw+cbo + rPJGS6yMoDHV8rZ6bxhDyGRlY+Q7DlcDow+dAfeUGyPKtrzxqPfGEWbYM9nibqgi8BTtKQcNRUB1 + hAfKJ5744YPJXs17x93JyslnnuLHE2DHJBAIurPvzohmzxaaTXFQMPv0vea9TP8gdI3UBwlakIhn + mcdBL3xfyLZ1d0dx50pifOJ9JN+afMLIYeg0uDeZlbfgbjR4DIckqKVmhtlGntcqt4LMdwPy72kJ + h+QovF7bOjPR+SR1wKfio4sjYAEE3R/3ybiojovgPqKIrZwZckt48ilXiRgax1DKLjvy3i3P/TMb + 3Mm3yQvu1OrNOcJ7h06De4zsYk8d9zR3sCju1Yu3kzruRrx4btO2MkVu6OYt7rxyW1l3WlQsrhF0 + CtyLRNjQjshJowmQPc7aVNWOSPq/lMDh9OqkUTpoiasScKxwtww1V2Vr8olCtT6SA1QZd3bavbcZ + UQmgc+AufkYNQd3uUicTmXbBXR8ztWjIg8m22ikuj51oE44xEwM/kipBeC68YaRPvLKhzXjvJoeM + oATQOXCHIOAOQcAdgoA7BAF3CALuEATcIQi4Q9CNcLfyKLUejlesmqnArZ//p838DR0aUIzVNWNf + s8nuSpalNutcKCOj422OuFjQvXDvwyDDIe4yrMBtpjLFTSIaL4PiebiAPF8dIwWClz1LkHjHg97b + 2E/VUfMQdBfcnRDLY9dlgtKcUMdoXTA9X0HrGZwEd9OWysIbUq1wMhxeLX+SCSCF98sDZjibU55M + kB/ybjKY4AfdzbrnkBX3zJQaMtA8MzWqCeaR7XnSlDa80OfiZ6zZjDuZ61c0U1O/RTjHYyAT3zXo + W3ZhciDPhmcAyizADPMO3Q13ppX+8qDdLPM2HKcey91Qi1/CfkjmQcDVxDCtvBFveY6fL7qm2Xs8 + Tl+tOEyMep1vJRB0H9x1Ip7PttI7x0N6nQxrV2C7kWa772TG9tr3JtDZrJdpm9ij4x51Lz1bmeI+ + AXforriTRXeMuwzdTaHmkqufW6tm6hNNvVXcV1acQSfXJpdO8xZ3bQVrckpYd+gsuHMePTHRTSZp + u2kJHKDxCLzMcOJEe0xy8GvcOVdfPcI9a7Uo40bBKzT+gKgE0L1wly5InjhNzkji7MDsuS9zj6R/ + hZNn8+zVrLiLf6/ISv9jFtzrDvdCzVqXm6HK4PJcH7Jv2eORGHQn3KM89clqpdnLSJl8jtndsLmb + aIkXwzmFHT8/4r4Wce3ZcresUfeWIlRZKQeqNk1e4rxC9gHT+KB74f7dWgKHBHod3C3mnkKvgzsE + AXcIAu4QBNwhCLhDEHCHIOAOAXcIAu4QBNwhCLhDEHCHIOAOQcAdgoA7BAF3CALuEATcIeAOQcAd + goA7BAF3CALuEATcIQi4QxBwh6Cz4/5AtcmYZzuiK6U2wP3LKi46zg9pQ51qkAQcNfSI7Mm5GKOz + Uwkh1wc5DzEE16joHK2YCs3/inOuyAmqckDOyBHZEx9Go/I3R6e+uTJFOiC6QJauTOBLZDlgMx9B + 4ti0dEBRr36MdKgGuL8jF1h2iiHKPz5roac+rUG/TH2dR5AcjyM4OPkfvSl0MJmXSbhtPdpAX7NO + HHmbTjn/c/THUfktX5XUpNS1HxfLTKG/MgJhDkQO3K+I2G6uYxFD0UU9s16zBHxN9H1Z6sC5lRhi + uvyNca9UZj4uKn/KcudKKQZnkzn9EXXcAzHsqKSVa2qLcjxOjouPx9G/TMdmrVz9RouDA+7v417o + 9B3jzmcwcMoNQqg+xom0bOmIjB3udoZbjsOEsx/RwD2Zfm24DvNx8G2LjsvwhSl0ofJyj6KaYMXB + Ae6vgrtYd9PUmXkC3IsRa+4mrr+Ku7uCuzpoL2Lev497FSSeBHcqq2a1fArcXXZN/Pc88esed9Ov + zWzdqz2LgyZJpMl31O6BOfdFi3X1QiuYX8c9EyHmaXCvat/I1JXiZtwT+++Ph3shYySWnRvX5QL3 + Xr2l40YOjXy4k3SgZcnOyHnUsw9+pNzlXNN5fuF0YL+PO/datKfBnbss3HxnH7g7N/phHgp3ttZU + SiqusdI0PcJ97jSLvNZJXHftIQpZE6fnnpqUIDeceZpuVl5ymvp7+O7xeXx3te+Fca92se7sBU8P + aN2NOuN0eypyh7rA3eRQbNWL74Ip58I9ZyXd6FMPyarLCdqTvFDT4w7Wvci5rU+DO9t3cXc3HZFt + ekDc5ZGBkwZWpAuxx52fPC2+e3+m0M6Fu/E+96dgmlnX5SSp0wMha++Ce3se3JMrDPNlR+T0mLhH + wZ0OwkkH674jMq5wp1pOa0dzLtwZJ+/jZMjgrHF3QfiHdf9HTybLvf9ZcLeCOx2V+jQz7tNlR6SR + 56+n6SDruCfNut44EXWSvOm5det+H9wd+bi5++6ulkIOIL+0BXfygY17mKeq1l4+Vd3hfv4jGrg3 + wT1Jq1tKHeenqsXoU9XI12w62UNV53vTVLtgyLzbZAy3T+nCsAsvy38f9+2YGRlJUpYhMor76UeY + 7I/nYszMHvd6Ijf3Ku5WEXdyGfiOm/p10IEEMvhnNWaGuypPg7vx5FBmsubRu5Ryx5rarZXaqfRS + vHTJ/zruNhax5ClafV9Mkhdt+KvhsC67B3k4bYqMfpQHGXxM/K8uzznos/QBn/2I6MTTMdhR3lpi + kpaJlJqPaaou82HVIteMTRN9jKcZx5c8yYojQxqnOntfuDtJXnho52/jDkE/VGPN/g3XSbN++Yed + 4/xCL1SXcAog4A5BwB2CgDsEAXcIAu4QBNwhCLhDEHCHvnuZEUUMehIdRBHLF1HE3CaKWHKxB0uz + 9DoipkXDr8414D5Obbp8t6jG3WxfK2cunff8tXb5bjlPZT+MSmMSpXPZi29EEatz3I0oYyX77CYT + H2g862/gnucJsu5iqqzxPJF8s3MfJ7I6/rxzI0KejyxfYOTHLPjlCO1Ets+XM+L+lShiHFKpysBt + J4ObNWKa5VHyI84YcN9Afok7m4lpO2Gcfolnk58X9/wO7oy62fE+NZ8mX0+J+xeiiFmd8FR5rpZO + aBlTWer0dPpH3CvZPMPTmTydxuqcz23GY7KRXBpaZDJ/jKl433LOPp8Xd54rJkFY+LCKFH7cmniM + uCyKLfPH2OjbxMcTz4f7l6KIddxbcHI/AO7XcLfe0W1+qjyf0NMC+SSeu/eODUWkF++jZbse2Txm + X+pJLTzHdPAl+awHwmWlgmvdjHQ8SRw4Kr4vPKnGW0uge1/LiSz8N6KIzbjbUKpMTPw93HvLR164 + Adgn1PQXaue1KM7iNnxYcvY+uIu7623xPH+QmFjmVaUsk1F4Efv4HCUk8p1fjedZceeKmLzh6e5U + cjb3w0+zfDxNis4HzQdDDZEmZj/kk+H+cRQxs0QRm3EvHKLcrXB3xf3sLKfom76YqQeXCTyTiTwG + mbzkG1kbbgGuwoc1vgPnbxvMf8E96D2ejF0Q3JkSxZ3tB/mCVAOCOgnZddzZfOR8WtyrVF+XZ9yj + 4K7H43URw0AQddzrqY7ns1HEzBJFbMY9ssefV7gvQaV+6IRrZ0YPCkaVje2JF/PINaCq+dF52hxF + TDdoBFL77g31m7jzbEEyfFLn6Ha+x13iPrGdLH6HOx/ACXGvedyBqOztAvcqZqh03PMO9/NZ9w+j + iE1LFLEZd0dfyddz1AVbf7LbuEcDi/qSgtw66exbni7MM7HdFDXURo8iRq6mTNqevo/QN3FPDG+m + n05MvTRax4vsMfhqmtdF3QHgLZzifj5npnrLVoOnvrN9UWcmLM6M99aIOx+0Gd6dmSIdkf5suH8p + ipgdoWi4G14W/5Lv3qOBGWUmZjnfRu02W3j9fVpgJKyShg9rurYPv4o7t934IYUTb4uKSmetv4xY + CbICL3Jq3XOhjSrnPnIndGacdMmw98h8cB9N5kLLAahjJiv0Y5kyuWXEjOcUTrLgZLh/IYrYCnd9 + GvV7TdUeL0YAppO44M6hN5KQxN0bqePew+eR6XH+d637/CS1XXsQYY6en5rzPrXoT1LbtceI5uj5 + 6emO550oYu5KFLGOOz97zVm89Xvgbri+DdwzR9wQQ8N3VIU8z7hz91j+bdyhM+obUcTI7lPjixu1 + USNKLT0zIyjcL+Beg3owVt3JSe+ZSfpCzNxAYtybdCc44A69F0XMXokiVntEe0ncYPm7Vc/Mz46Z + UWMtuGdJVyAx3LOvqdTGP+19q9FqFDHbNzCEuvtuGDHg/lQysUcRs/soYvZ6FDF6Y/uzHfpgRgSy + JSjcj1VOfWZttatLn2kbzduRpSHI3e7SudHDh8kGtscZA+7Qo0rjg5m2b0ttvh7Mfj+YGHCHXunu + h1MAAXcIAu7rdsbSaDf1o2kAqZ5+Ftj2OUHadUk0kpmuxqC90gFv+laX/if0ULhzrO0RZTv6a23l + qk8qxjPJMzeVvO+DkGQWVtZx7/xJHldzb4D0EVwZm3Q8bKbJVv0r4ytwe1TceRRbv5BOH4Qd8d7X + CDKI89S8E9WWDsFWm+gtj5QhuOlTH/DOY2pa9fHa3NR2ePNKvrRUx4EnWPeHxT3beXCszubJZTKO + Ews3xwN+mqkxx270ZY2iT7VL5VlBPNNpqpJ3u/IT4Zbvmy/FcKezHggfVQhjKq4b01VkqGqYyCkr + lSd8cpFlxhOvlKxMgqaTwl3W8k/6u3tqFT4ZPlnpEOfTI2tCD9ZUHYZv7CP4SozwDM7JW06MWXq6 + keizDDa13hU/BjTT/7HKQ7TA85zqva2/VYdEBlY375wUfQys1oPkEZ2VbmyRB3X4UKlWRJ3ZInMT + qvOGqgePatLnhYo7bdM8Ae4TH7gMIC3Og/cHw70niipjArYMWyaHXkatWSODlLvJrjo+so+o1Umh + U7X6qVvSe/u2Tr10TdfMPreZlmgL5LuHIA6clZuAj3LY/NS7CuuuD8o2MpVRx+/NuFN1TrTP1HT6 + kzp/YO+hcM864GFYd6Nj1lxoirtkwVx5KJZo8jLcbuDORp+tu7QJyTO4e1NOeNcJWn2kuxlxNSqZ + e1c13IbQGqMUufQ5UDk2+czZEJ3LKcdpa937pgQ+t2JC+I17GdoKtzslYfgwHYlu7zjbKw/jWeOu + d3brjQ63G7OEmO8YxgShZO7ZlGvRKOBSVrnR8LsyDnJ4bAN3Hkg+pdZ4roTMx43Jc19lo8PJzWUN + yDB8d5P63ctlx45OMq39zNirxiHA1GzYvMy9s860nED795uq3NnSuhcgXklj7pve3gtde3VtejcO + o9/nNukLOQbiFIwpcmYMj7vTaeizIkce2+5g5Y2jtsKdQxUkwpkdmD47m/cQstzFRjsk0aqNP8iI + bvpATlKdxpo/odSHN453nfcYTA1oLXwb9yb903mg7+UyFnFO2AnO7KcaibvR15Z5/Oy0yNQ3vvHz + 6jmrdZcv7to1Y7VkfRKkHF5aTYncW/fMg1B1i6gpzJlzOUZa2HrombnffeCu9Sf1NX8Gd1tlwqmG + Duvlp1tRCQ20fxd3vhun+TlkqzrLtxW5YdpijNE9j4eTqahtqYnH8RtqpfJs3NimZvSBZIp3vhpN + S9gfjxo9osscn3xgRj8n6WzUr2RZqePzWN3QWTKbdbSdU36qmZKkC1Uj4FX+ZEYleMooSedvzhR0 + SfygjnEPy2QO4P7LFyTipP827ja4Fs7pzFi9Rfa4YX2q8DqKmN4xN1HEWrQPgzv047jLlLs17o3n + ZJ8yUiG32aIEgOh9ttzY4Chi9TCKWLljFDHolLhHJ5BvfXcN33Y68ZzT6qXfixvxqc/UuxZFzN0r + ihh0Wtx7MLw17jXYFsoJi1uLxk1K2gMcnDyBPF8UMei0uFt9XLfCXWMNnJL3qT/pSBIL1cwYny2K + GHRW3PsVXeGuHs5Jn6pGjajR3fGO+/miiH28z0+E2Dpe0xwV7efrZfvprovRp28OFv4k7vpQ9Zy0 + z0ND1R1X3JcoYu0kUcTciMl9WGHFpmzGuJbNmvq0LyQt/tzwqBopSh7RNmqT6zirrIf4w0NmU9Df + vfQvl/tmH/LFESIPz6Xv4yCjHNTSNjQ8vJ3nf0m4oj4NTI8r+ps+TVbATZRkTIbjKFlnT/xQ1QkA + ToblxQHUB1HEmIPfjiKmt5Nw7EJpHGy/rQKbFTSQeumDEPt31Uvr3MlJ8GSRuDWeZMCNjLT5yQtG + ZqQZd5RYbPVczNRV6pqjfUiVdJ3qsFQDmQLgjZUgw0lMlh5X8TbdMtmN4p4kF5kGOOXgYPTRnfIh + U/QxlSIOjTIeZHjs+aKIZT8C+adMtqsVI/+S8GJ58B8Xv2U2hNa6IgN+y2przUzGcdJiL3v0XHvk + NkAEBBliw6OtpPPJTj+bxSz44TrWXuZCh+UkKWnl4bzG1mybRGau3OqjFyu3XLfMvQ5SRHVDfRlt + w+T55iyXj4NpRwkOzj3KbT0q7Ua1lqOATYbjg/XQYdalqc8gO58kYJgETe8TgSS68gmjiGUXfR8Z + lXmyqmCbxhBCupReupMymzq+ixc2z7P76KsESpeu1DZMqp0N6fBueFpQS6M1/pNZEOaB7QwrP+TI + nnOm0T9+xuGk45eOQo7QBTFG9MKrxcWKt+Ckvvbx/GM4ZUpmXB4/WmJTkuPSGQAe/QWmvd+SuncU + sSyd//q3Z7rg52Pjxqx2TgcDivniiavLAWj9EMTWifiGx7NQ0W9g9V/6Wj/nywx7wRnHQpbfltlL + tclMpMqHlnjkeuPpiVZvOUluUm7eCT/PydH1Efxz9R57n+d/2bmXoc5xcKHfqFbfx50uJ11Yz1HB + yekIhic2jCsnT3uVZ011VDeRN+gLcr/k67qqBWOu93yLWHA3P4373PqMgb2RnmFnCtVyaajNJ+Pb + tYjBcs4peql9CtPYieH6Tx4cH4PLq3pgVwcUfZnWuMO6PwTu3FzllEuJnVgTsmt5Nz5ccU+SDMOv + m2SS4kUffNg97m09J6SvICv9aAok/VEqaeC8qXlJwqS4+z3u+kJHVesyDpoao7nIts6TUz9PDlHc + +/yv0fWmuMd9Kx46K+7isnhN7CKuzDLdVHEPfp3Siy1+98g05Zdc9711N3Nsmr5QsXDfHyfxKUly + 1OadxlFYcGdnxopfIrg3/SQTzyv3sSQJOaIPMpu49mVMbRohSTruHf44XBztTfanSu0E3N/DPcnM + nSCD2oq038xMjybg8332skZv8aPnUjiIwu8ed7plUMM86XQpN7Co/qeTbTuv86W1jbrkHJOgGVLb + lHrH3/P0RJlL7nTI3pi0leQUBJf0oLJfcLd8Ltg88Mv8yMTwr8KXOT3ubYxInkyUfNI8s3qZb2z0 + nX7H6/JXpo7MXnr/14k+qxnZvFKylWSMpdeS5l0l99OTcVrUpLlUZPpJKUrjMpomv83llLJal3gK + lpHEe/SpTCPAyDgFqTX1V0biKvmiHxC/cMiRfuAmRtB+fty/6zM8yQwyznq/3Ms0ZhgE3O/7cz/p + 66/zFsFAA3cIAu4QBNyh27S3STx+yUmE5TQVDismrWEJMCa59pLj9nWVb5yx/OoScIceTjp5L3NG + VR3ynmWBBC2Ud9yFVGRJ7NE4yjzfD7hDD4e7tRx4g3GPgYe681NvM77iIcGSIDhNyZaQreVwY8nW + BtyhR8TdkPUuZsHdzl/x0z4euJGDhhSzYuvjWSexAnfoBrib4BRx4A49P+4pRAX93riPEGH67JGf + R1sNH4YoYtB3cXdFu11m3G2obYM7rXGPlmoPGN16dkMZR6pxl+coYjwRLBhZBVHEoM/gLjNW17gX + acmaBfceEf63bbvE99chtRLx3iadDocoYtD3cY+2tA3uMSQNIzlbd1vu0O0uCVNCNl6jo6cgo1Cb + bxVRxKDb+e45NF18/6YqUU2FkAgrUecY9IR0iCIG3QT3Os0h9O6Oe5M8PlnHlzqZTqO4uzmKGIN9 + 8ihi0H1xj+/iTl/lLN76vXG3OnVfQ4Rxg2Lgvo4iFo6iiEU4M5DiXmqQtqht7KCT715L6U9Vawmh + Jc1/6Fa+O63R7kH7PFc559STv/HHE0YRg86Kuw6Mif31csxMYcMudwHNE+7uNGbG+NB4epfnGFMp + SmA1CSvnJIqYGVHESjpBFDHolGqFJKH9Ss7cr1EjL9GHM/yVhhWbTKEv9RmO5eV3SARXJURYlNiZ + kee3c48697JfRhEzc7zQcKcoYhB0szra+uPSdTCx96KIpV+PIgZBDyjgDgF3CALuEATcIQi4QxBw + hyDgDkHAHYKAOwQBdwgC7hAE3CHgDkHAHYKAOwQBdwgC7hAE3CEIuEMQcIcg4A5BwB2CgDsE3CEI + uEMQcIcg4A5BwB2CgDsEAXcIAu4QBNwhCLhDEHCHgPsX9ffPGwTdUeZ3cYege+o/ODMQBNwhCLhD + wB2CgDsEAXcIAu4QBNwh6KlwT7bxPln0V3/g7hVKSjOKxe+aTdvSzgXdrTnWNuM4zNkv5Sh0fzMf + 8vowV9dFz8Rqrd+/NAsj+rf1izIOaD6qm5TxlkdpXAihTCWEzG9yoILHYO9MQePCZCoFly4EQ0UK + wc2lbbzQpcnQV0beyyLaqDZerfCHxi92qrT+aeWogDZEes18jG3KITk5EDfOgmtT6odrx7E5009M + uwPtmc8qF6PK36IXKU4Ts8NnO3K5SrBRDiSfCvcYMpW2VTl7wTo6CHOX07jFXYuTpiyvdKGDC3om + ubRGv56mgTvBoIsI98x4uExoFL4KD4C7Y2As/UmTXAKtwOMs5HE2aM0sZyJLBbkT7kkMDl+JKKBH + Ps2O4VfKpWiVTWbsxT8T7pnOc+SS07k0Rt4mOdf3xj1JWbh4jW0Zc+uYBjqjla16CkIH494aL5c/ + VPhMa4i1py2Ej8fAPdLx8gElvoUVucZNa3Ybh8t3Lr1AeRK7dA+rVNWUR6mRYoMyFaXItWLc1Sbx + VZqksKdyZgyf36K4M+RsQE5gD/lCUzmi8CBYFLrIxagB4VetqIq7mfpNKekB8DnnxXxlzKPg7mbc + 5Wjms0CrjMNV3MWXM+5eDifZEiYlit0Oirv6WSvcg+JebnLufwj3rJ8bn/aT4R71hiNG3/J1/wD3 + 2HGnddJj4J6DKQe4mxXuaeBunJvuhjsVbjSmxJEsciG2uGf2jgX3eG7c2Y7mPJ0Ndyv+ouKu/uM0 + Lc7Mddzpbvog1p183fKudaf3qeM+tryLCGXmmgwQO5HuAHc+mPoguHfX7Dy4B3HJo/C+wr24znT/ + Zw9xry6Xx8DdhniIOx+V6YdrN7jzresOHTPSbpD2XrBU6EPc6VN5DNyb4nU23JX3Fe6hE957ZvJ0 + iHuqYn8eAPdGHvEB7queGbvHPfTVfrljhsvAPS9EdhFjcol7Y9v/ELjzeTRnwr1yz4zy3pQGw7jX + uO6IdO0K7k27zc6Pu/SnH+Gu/e5yuLMzY6bVifntjpki55eQz865Y9yNDeFBcC/374a89N1H7wyX + tkpH5CSdkr0jcjmUHe52ehjc0yHum6bK0jOT7+a707k1UbuR+IZbj627eRjc6wlc94OOSH486q50 + RF7iXmbc3YPgbqZD3NuM+0k6Il13sKihwQ+t7UVHZJTHZfmcuE8H1v0suJf+mEm6Ayqf0V45P8Y9 + 9sdMtT/mPi/udfjuDNKncNdjuxfumZrUUfrVG3ksyXbrXgX31K07PyI8J+7UQqpStLPh7uTBeqZX + x8/oXHViOFzN0lXxHu5WfN0sjzrSqXGPclxy/+eK+T7u4szwxXLaU08npt3hwhh5OFO4X5RvoKVf + lNJR0sEQ9aS4a6vfmdP57mPYkQ4RyzpCRAdLjetfr+A++nOkMyyfGXdpZ2cjfkD6CPea5mNr9xoz + o5Q4eUhg5AYa9WJlMy6O4N7CGERwLtwnU2ti42L6CFsdzHlvDBKJy5FEZFBqLDKMpMRqtKxc0JR4 + lX4+kuVBp3IYNpbGa5iTHM/1A5Xj0aLzn6RFNuMs6Cjbfrjyymei8cp6Yn67vHI2qYBcRjr7cnb7 + QfDFsVMvfrrdub/5QZqGGSPQaU0CTgEE3CEIuEMQcIcg4A5BwB2CgDsEnRT3f09n8IaI/NCjpDMg + 3N/e3v5oyppd4pp9HpvDvDZ/9l/82bz5c2WT7XZ/Rgn2m78thTsUf/dnV5TVnv/sC/rnsFR/rh3c + ssPLc/DnWuH+7Av4ztnb7efPta/nku+K++fyFP1Zvv5z5ZL+uTyJ27X/HBXl+pnYF/7P27uXfvvb + yN4BQcAdgoA7BNwhCLhDEHCHIOAOQcAdgoA7BAF3CALuEATcIeh+uJvcPr+qZx2tn67sJGXzwS4v + cii4dwPe9G9dfN7rzLnYJhs8H2n0vs4nn89l05c7K3LZkhaxv0xaWi6787eIdfYjR2mO8T1U8zlG + F4/K4a8cn/1g7+1ywxze26BHgfL5eWnnU9K8S0RR9Db2M+RD82F+ua8qn37jc6LX/qK0W+cT0U4v + 7ZS4J/8l3PXMt1Q5TQ9bIZMa81+GySmZPrXUMltfE2PVvacUK30Uu9VcrrykSGIOXyPXH1nBOke2 + nrOIFHqtecQBs7I33S5nm/tKNkvNW9Z7ilu4FwuZPZ0EP7Hd7PkmPJ1X36y82HtXSObbeS4VMa6F + 4xoZpbT84vMpcbe1fR13umeFFnymi2F9yGRt6PaV9FrRO678mY+XzorXE0GvTj5G/dL1U8Zb0DtZ + IUb+xjLJmbaiPcxGg1ex/G2kX6KXyitRFQu0nqOf9M+DeyoTuy/MOJkCPuFRjo7hom9i6FjdU0Ws + TRYLn/RF7JjnyH/ZyEs4Je5s37+AO6sSaEnMD1Vq44vUAq0INfP+TKJPVut97PXeq5PCS6Ncu+y7 + MXNiCvgGzbafLnPOvFHhDX2RVTiOdVptxy9OqMhiS0xs0xNp4E4HzkakKO7MTyiK+93DeQ7c6bor + 7lVISnzPF6Ccfwbcna2lCcdyyn0xvDUdbLf7xpHtZaj5xpvFQsneOWQm3Q7Eeicx/T3xmVgEqggS + RLOK3SdjLWRn+TTfKVfbNW+ya7ICVyrvpumJcY8Phnt7Hty7MxP6DXXgPqw7ORyRzHL1iuQKd3Hn + aiSfnFyTQtfuAHei2YonE2mjzE69hNbkvU3stOTK2zndd0y+UNOAo4QS9OkZcaebJJ9Wt3ZmxNT7 + chbcZ6OWBh58v00f9Tc8Hu7sVFjf2gb3oB6OWncTpbllhnXnM5CceC+0ccfd9HYNJ5jj06T2m/Yj + 11Wbtnz5qQpE3W5xZtivjRM3XJ/LvvPxRj1Mpl56QcR00nllsKpvZ8CdL23UC8V10WhJuY2Wb3JF + fgr39HnctQ9Yjs4HJ03VJrcyNbDUDHXZJ8U9Tdxw1UvjxSbTFmy8Hfsgs3XXlqa48PxN5mphpC3r + ejs3ekf8u76dNlVpvcgLLK8X/FM9gRPjHejwK10caY5Xud05tRTuBJVbWl5clDJKOgkMWQ1evkXn + wQ89Zqrmq6s2q2567YvIpWj6uMhmZzgguX5Bq/RY5BJ9Xbcg767QCkkrmUmJNqEVmnxvadfJcodR + 79PUbl5ZZWxX2ObzDsnzadL36Z7reTN7aHIypMHCB9c4yH3RDteY6/2LmDSpebZLSbnvWEsrFJwV + dwg6pYA7BNwhCLhDEHCHIOAOQcAdgoA7BAF3CALuEATcIQi4QxBwh4A7BAF3CALuEATcIQi4QxBw + hyDgDkHAHYKAOwTdB3djvxCBq3ERTPt6QWp9Z4dSjIbru78ycrJZ/Vxp2JNWT2P2WtXwznYuUYfJ + ppPinnz4fGziJqt+Kg5T3KyTr/3GHP7NevC+kwYBJuk5qhoJu7+cQZXYkYBnI2a6CRKvs7+cEfcv + BThrcqY/hbtfm3Pjr0UCAu5Xz3XgKIHRtyaBMiW6IEdv6y9nKCEVJXqOZDmF0K2aBJrrLyfEvbk2 + jdjTn7kEHBONcefMKUljVxOwTsyN86FO/RvXo5RqapWsyUtsjEHifunnlkOks0IvtN3A3UnmkxJ9 + scX5Qnsrc+KWF7PtHPw1z3dFiQPsbfXvWY/fxb1IJFAOTdzjjI4Q5HVd8NM1Vb9g3dmd4WPzIWUN + 4O59j5SafXP6jeOgqJLNw/hMqJris4byzSlI6oKka9rMgSQzbzAiBfMFjZz4gBZGuk1G+a6kJ05O + c3ytmzgzYc7poBGAyynSGcyFjIROeKyA19Pnw1c2Rs8aryHXOdYrcR16AHLeTetZCHKPCKzXyI1k + BxoLXoMGZ9vtVJXkD4q74bYPpzvpAYPF1ap8u3zBpmyQoNJGG0wni+/eeRgB9x8nnYH5QrDWJnmm + kpc8MZw5hk6/yzW70U5hZ06/UcDllpYH/EWuWOQ4scGXfv3IU6ENWrfu7Apx9Si6rWRBKNwYyuYl + cZ/dmFNadynKDnd3auvevuIlSNR6r/bcTK3RseVE/rjVbrNWfCnyjRm4B71sHXe9YlZW6dfPWh/J + rxnWnf1+rh4r3KU1a58spvUncW8cA1lxP5/vbp0UcvHdzfl9d3a9bfsK7ok20YxM/I9b5mHdx8OJ + lDj+etT+FnJW2D/vzkwRZ0diuUf+v/imCZyS4s5uC7vpEjY8D2fGjlRNryWf538tji4Zc56eGUtX + ybKhStIzU+zokgmn7Zkh19v7LzRVk9iZJhHr87Yf02oPftWXrMcbJdmeGbhn6ahd1uR0BPTehWrU + mZGMTOQPdvvAblKokoSvvhrtcvBpnKo2erkLvZyk373IxeVeOGmNOfFns+bKPGe/uzGmtc8/JTXz + X1PT/HZ8We20fJN65gONzd9Pj+l9yLpKfz5oOfnB2EviLAi6W7PsPtUX7JU30/Y892eYJ3yqWsf1 + 6o9Te6FP2hH5W4p+gqBXwT1FXEPoZXCHIOAOQcAdAu4QBNwhCLhDEHCHIOAOQcAdgoA7BAF3CALu + EATcIQi4QxBwh4A7BAF3CALuEATcIQi4Q9CT4V7z5ydMm9KufprVck7X8xdAX7w8MY5Q+SXX1csp + 1CRWHhWxFyllZ5aXE+LuvPt82LxtZPfjSFHJZ47rmwNQvYGS12BK/UpxpFh9OYc4njP9GRkxrM8c + cbS/nBF3H78QYbMN3E1q/MlqXF75NJLYZIlozTHANLASr5KQquC71igs4FviyPAlqCdJ/cB1cQXF + Ejcx3yRI8Y+ERDVfiLA5jqwHxvNB0gxwDDzHPozsh4NJEeuOa7glVyl4m18ygO9NlHONpht35qn2 + sLKnKFx0XJrqq6az0Dj/HBI1Tbe4u/8MMu4ruZnScksohLs4NMQ6hz71uRvx6r2hvXL0x4lzE3C8 + TA9X/ns3X7YabbbzJ4vvLjGdI3uvcfZ1z57OoLovW/fJkJUpk1ZmDsHMYffne5qNQYPcF8adz4dL + APebPNnuIJ8Xd1v4NU2Pkc5g+kpupo505Ni9ddLo9XIBXGgrhzJ547LEhJQMBpwsDsD/A1NmcWbO + lc6gDHLEvWoKhDmvMyMl/CrumnQmiocSJOmMpCxQ3DVM84w7fVnSVxLiQOsTHubLw/mByKTw2W5n + iXgtl1cvuB1OLpMQtdV6xp4Zn+IXnJnsXE7Jl+YkYVhynKtAcpAN68458zhjTxDco/TT2PR6mfNu + enk4oYOnGyi3gvTlHNJcFJooLnCmIhsln5F1N+g8+gncTZb0kJ9c2eWcJQVHKKVMMUpkfSN5I9t4 + slAklj3noLROE5o4D9q/72hKAojQes7O8XIOyeUtksNgynHkGuWXG9x/0JkHvZCAOwTcIQi4QxBw + hyDgDkHAHYKAOwQBdwgC7hAE3CEIuEMQcIeAOwQBdwgC7hAE3CEIuEMQcIcg4A5BwB2CgDsEAXcI + Au4QcIcg4A5BwB2CgDsEAXcIAu4QBNwhCLhDEHCHIOAOTVNrz4KoAe7Q+0re+1WOsawphOy8pM05 + hSRB2X3lRlb4JnkYR+Ix5yUFn2bYAe7Q9Yvrc2t5lXBdk0mGOYPdkqn0/pn3YuebE0ZPPldXehFb + pbJlb8oNMosC9ydWleypZNMLvdY2hRhzYpNPfzItNoENZ8y08O64Z00+Rq+hcVbRdRXlJPFlXU2B + O3QgcgDa8A/IFwice90w7pzek3OUE0zBx0zf3xv3KknRpxg4QXbxIxdgTyPcTp41GzpFQ5VsJmGe + Ou5sP3PgJL38rniCS5N5ckLbu6cRZtybJtOVRLpeWx8PkSQeOols9tYM3KtAnjU5ufGpcPJgFzJ/ + dwrcqRrWXpImTdOOO6w79Mm+jqywEEUMUFzh3gj35kNsoZwDd+N98OxrNS5ekda2Vd/dyo0JuENX + VcRAesewVDKTXu1noAVBjCX9zznZmz8J7pNpRvthuF6aKRktcxwvwB16x7x77b/O3EYl6+5zIIik + kSoejKUGoefvTuG7q/HmdnWjQnFVpZqa6F2Y5OXfO2aA+5O3VaMTjItrzUzNxMgXvNTJRCd9NoXf + GZPYkN67rNqtbrgkVCj+ZKmMzYlRb70fHrhDEHCHIOAOAXcIAu4QBNwhCLhDEHCHIOAOQcAdgoA7 + BAF3CALuEATcIeAOQcAdgoA7BAF3CALuEATcIQi4QxBwhyDg/opKzq2CPtuLVAWm1mrp39FVrXEs + tZE3NCmlxrvkPdazZT1YSmuKxnUvxQD3l1Lk7ARu/ngZVct41QG82YceOSn7zPHoOFpqnkzIofXI + 2CcSl1YrdtPAUObKYQH357XtzGtdklj4cnA5eaWDi8rhRLNXkiQuKocNM21ygYPV5XiuI3VU/bS0 + UwhSr/mTR3z3V1IV81YI0ywWj3BvwQcrZn+g0PMSOM8B0kMh81171WiT6SHEOK50Co3DdTli3aVw + siP1uUd2J6teNSRqIlQNcH8l0S0+MgPem+Zjj9gePRlr+uxWtwBasxn6jmyiGkkTXFgZx8yhUDOn + bWr00s5m3CVPR1oHvE7Brd044P4a9j2IGSezbCSuL9l7Q7d6u4pnqriz4eYQo1RBTHfqk5l5d8RQ + yewvmMnmVjPnxzjXfczZvMGdjqPdgHfg/jiSpJGJoCUXRoP2Fp9DyIWdmdDWuJusEXNtR0SCpY8G + aR61Q0OoT8HG7M7lz1Qfqnhhc7KahHQGL+fLyN/McaA1xD83OafEwX2N92vcGXUmOPjewcG9OB33 + LC8cUFcTZERJFuDTiY7UJq6pZrTHezoD4P5a1t2H1CLnnuHsBFl8d0dYpLh0ZCjuhuhO3i++O+dr + FEe/0LuaqLnK6ceCfCdZbIw/kzfDPTNSY7mck9UEBw3pDF5LkhU4SpJdlx1nkLQ+SCZGH4Yzo/mA + yRnIzrH1Nh0S53vujiz5YBr3u8s2kW4CJYRztVZ72km5jckh8wuaqq8mo1C3xfPQNARt74poUgCz + usQX6eL7giRpDerJDnRV2v72JunugTv0SuYCpwAC7hAE3CEIuEMQcIcg4A5BwB2CgDsEAXcIAu4Q + BNwhCLhDwB2CgDsEAXcIAu4QBNwhCLhDr4adc8Ad+oELXGIhRTtNxWXHL7qgTi1GCUpW3a/PVC0h + jN+0rk1cFCqSRCbm9yNKVHI5SyhgflOAO/SRWlDFKcqrnbIuyJMNGoDAhV82tcZxAdwoX+LwaKLU + q4Jbl50+pH4IwB364ALHSHBFlwz9TY4oL7zAxcJcCVi/jjvH+Yhc86bmwsCdbjVO4s7nGewYspX7 + AK1l4xxbBLhD79LFMVvIlhvGqsnbJDQF+ebXcZcfdAy1DQP3uQh1seMuFP2TJZRxqMAd+gzupuNu + 17jnoBbzPrgXpVfLUhZPxR3gHviNJh0B7tC3cCdzqW9+HffYvfQN7mTgJXZxyO7Qut8kqB9wf1nc + U3DkJ9c74E4tz2w2uGszmssWQ4lHuKuvD9yhb+JuifIOlPv1Es3tzhl3botW7oxpR7iLkL0D+jbu + RTon3T1wF97TCnenbVHuLXLTkTPjTLmFeQfuL4t77HTdA3cugvoz/U5T2iQeeiamHXpmoJvj7gif + ygt//zFTky7RVc+MiBoSKeQJuEO3x92E0Ty8Q0dkkW7Qte9uBXeqBHl54grcoVvhPkYXrJyH38M9 + zjZ8udN03PsIhw3uAbhDX8Y9cRNxvLUhN9PYbpJ1N639YrKaGHKq8uhose6R61+dbEqJyzMl26RG + SAOW7wQJTVXo87gTN9mpm9DJd93QurD4D7+itOpXtGPMjMujc1LsfpaGtCxN3E/Jb9ARCX0Wdx2f + IvlTBTF1JqJ0/P0y7lMidl2bVk1Vec5Upy3uWrIyhlDiMRP0mWuclCxTi9VsTZyaUnLEy3dN9ZtF + aiHPvTRNypRKHSVoXF4t3rzULl8Dd+jhquB9wAPu0CvVMpwCCLhDEHCHIOAOQcAdgoA7BAF3CALu + EATcIQi4QxBwhyDgDgF3CALuEATcIQi4QxBwhyDgDkHAHYKAOwQBdwgC7hAE3CHgDkHAHYKAOwQB + dwgC7hAE3CEIuEMQcIcg4A5BwB2CgDsE3CEIuEMQcIcg4A5BwB2CgDsEAXcIAu4QBNyhe6pGA9yh + 11CLORTgDr2Givc+G+AOvYJsJtwjrDv0Eq4M0x4ScIdeQZFo9w5NVeglmqlMe24TcIeeXqYEdmXq + BNyh51cKD+jKAHfoe7RzM9V7aafaBNyhp3bcndBemB6bszXAHXpeSTO1P2Fyj9RgBe7Ql1X8ynFn + t8bekkgD3KETWUh5mupzd9n5Q/0Kze1d5kyMDbhDJ2umhmHR3ddwb7W8ey9oOi6hNeAOnaGZqrTP + 3TGM+2eHRbaoN4bS3nOUqE1gXPiRBgFwh75Gu3bKLOPc46dxb9VJbz1VlusDy2zg/s3UbxnG1gTc + obs57kq7W0zvp3HXYQdd6b0fKNIY5qZwy+GmE0iAO/QFt90qsnHrfXzKd7duRfs744bTwD3Org1w + h+7iyORwMTCsfg537c7xITv3QV9ODHWNewDu0F1U1TLnNH0Z9+7yh2ibqfndB1OtJNlp0Z2HBtyh + exj37A/GhX3OmdEnU9l2L95+wGTtz67s13o5gTt0M1S6375rZX6q391kNe2Km0kfYVd7azZ9oZcT + uEO3dGXC0YSOFj6Du6z1hT6WGfcA3KF7eDJq2xdPOtk2+/OxfWCv05XOmJbex70Bd+gutOsTomHc + U3S5mMmoP5+jK+0j3LfYmtRo85irubZ+6s0F2c404A79mkaneR88YHoUgt569Wuaj4a7XFpp65yV + luhhq7WtcJe7QqoGuEO/oyRP/3MJA3ftaCnrB6VjzJhxwV24KIyt2yyJfKeI2we0W9zrgrup+WCn + wB36AZkktp38jtyNcZ+7l9nDyWH7oNSIId8782XfXe94Yneeq8k8yl2HB7d+uzBaTeT3bjExFrhD + HyEiVHryPdgk17VrI5Vg+DPdrW8yz8lt4RZvJq8X8FZ58YKqHW2ENHYSF9z19yxwh37ekRFXRXyJ + ohCmvKLdHLj1gcx+3tBp9t3zS41hq11Crx5RbwJcOxbck/c3insA3KF3e2Ssuu1VnJPU4VwcdmpB + xt2wLwY5XDjlXEXWffarfTSx9QIz483d8+wRuV5LnPj54SYB+oA79J6Y5RBynf1yRnaxzPxpxr2P + XYyHox7NrllaF3/IakeP1fok++xOjOLOH0q9yXAC4A5dl5r2UAYlBF7QuRfZzbirnV6sr9331Szm + ffVg1fpVF6YZgyRtrzVq1fsrb5n4BvDvY4GBO3QNjcat0JBdbWuP24r5rnWHO3fSqPVNx4PaDQ9C + WKLspf7YSrwWwb3scM+pNXGBoqAfb+HNAHfommmXB6lx06NYmErxwtNwRBT3nIb7vcJ9a421TWtX + XTV8A2CujRmR4nnbYdXpDuK04SAVKd2ibwa4Q9c7ZLLbAZZ4fMzcW6IOdlWy67C+yV8+aZ37bOZl + I8gkN0ONsM+VI+kAnLqe5+e7oxP2T6qAO3Qz255DiHY/8ou4lI72SE1P+jowo4K7k86UuvLK86Wz + Le57D7Gn7Dudr9T8Cvcc3fzoaj219RbOO3CHjq17rQehH7mt2r10U5uRaUlWuZ3G8JZumWVa6+7m + UGdT3bt3os5XSmvcDxR6p/8/O+/AHbpCxhEa0oTkFmeU7kLHnea2t0rj0spkQk06GB2fQu+s18eq + RD7XgGZHs/cK7mLVbzGzCbhDX5Ha50BNSO5TlI6a1C22Vetr8iA0HvBZ9KGV6bjroMhahseywT3k + 3L0aNxq3EbhDv+njrJ8OMdBFLbbVJ6FWh7so7kdzOnrWj2IUd+3Ed244OWP32o5NLblVj+YNnHfg + Dn0d99C7WCKT2Abuefg0w/9w+2GQcn9Q3qXZSq5QEjM+oFavxpU6QvHFVQ/PDUJrA3foK2ph3Xws + M+5p6kPYxwquez7OmFUrgGcw6bBfiRbZbwGLh17FtW/N2CXyxtzitf/e8w7coS/hnufBYdPKussU + VgkOM8bT5LFysdUurgx77TavBkO2De5xtEvHNCa7Hmv57847cIe+IjuMd+ruRZyHdemY9plkaZiq + 56O4tup6PKUUlpVMWPXSmzzjHpZRBWOgvPl35x24Q98w7lkmHXGfY53Hucx9MnluXY7VudOyFaZV + R5uV1aCavPTSqyekVadb9zpXnekWw2aAO/QFuc3oACNDFeuYecHd8MxwWZ4lla1jMwYAjykhdeCe + xVlPaX6E2tu942HUNO+tAHfol1TXM5DEKSGnPc72V6e06kOjtOI6qr8else0aZm3l5cxCNHNoWzk + eda4PQzfP/VRxob8IgvcoZ+VyZun+v05qptx1xBL0wp3HnrDnZEXc/fKFveqc/4Wmy97Tb1+zQ6M + ThExNeZvPl8F7tDn26lzn7sY3D7lKPrN0F+rjA5EU3F1zNde7SqFNe5k0I3bDhngvcY+7XuJXDYe + 6X63RxK4Q5+WgB1H0GtTlLoWBnxNx+jaTe4m6XYv81CZjfM+vHOuLX2/oy7V5fHTEuF9ub18M6cH + cIc+rTw8czbrqY4JHWUklDFRJuyx5d5FDdAx8ZEfIJFaK8tYGPLq5dGrjhsu+XJsZLxsPHw3GSVw + hz4rHaPF7VN5JDrPTjVzfMgWJRZSCcFebjrGB9Qa87pzvmq0d7XcKXbzPoLJ582U15Ea6rvYAnfo + K657ZXJD6g+K4h4iHS9g7L533CzTkwJpPUlpGoMM5LahTsxw3rl2ubJm1JScXfx25ztwhz4rp21K + iUYQmdmcPk1Vc/sJSrQDt5vLGrJdYv4m+h3XOFLwdkfWpu+PEwPu0Bd8GX5gVCQyTC35S0FKySxv + gA+x7I106yNqtBaY6m6fLh64Q59UWoJjcFBeanB+CR5av7plSE3bjJRcuUJmBMw25vZwAnfokyr9 + gafdBTf9AmypRpdZJd3nGIA79ElSXMdchw58fz8t3Q864A590nWfMxLkWwQnBe7Q2V137Xh0Nwm1 + DtyhB2ipihNfgTv01KrzWK367wEwgDt0bsV5rFYL/x6sEbhDZ8d9DOHNN4i0DtyhM8stc0bdP/VE + Anfo/KC4xWOPD9sTCdyhz4GSl2nR9WF7IoE79Cm1Fe72XwMCAHfo5LiHhXF6H4E79CK4c75J4A69 + CO48aqYBd+g1fPfHHTUD3KHPgZJXQwcicIdeB/fyqF0zwB36nFZPVbkn0gF36IkVV4i3fwj1Atyh + B1BZRXk04UEHiQF36HOyq9ik5lF7IoE79DmlsExiMg64Q0/vzcRVu7UCd+iZzfvKeY8P2hMJ3KEv + eDNpwT0Cd+iZtTLpBbhDT662dD/WB33OBNyhr5h3O1t34A49t+zcWHUP+lgVuEOfhyWPhBsZ1h16 + etWewt0E4A69hHkvij16ZqDnN++SA7tJxnfgDj27eefQeTHvc2ADd+gplbKmiQ+PGfIauENfc2ey + 5s0zwB16EfvuHUKiQq/hvqdaa0LAawgC7hAE3CEIuEMQcIcg4A5BwB2CgDsE3HEKIOAOQcAdgoA7 + BAF3CALuEATcIQi4QxBwhyDgDkHAHYKAOwTcIQi4QxBwhyDgDkHAHYKAOwQBdwgC7hAE3CEIuEMQ + cIeAOwQBdwgC7hAE3CEIuEMQcIcg4A5BwB2CgDsEAXcIAu4QcIcg4A5BwB2CgDsEAXcIAu4QBNwh + CLhDEHCHIOAOQcAdAu4QBNwhCLhDEHCHIOAOQcAdgoA7BAF3CALuEATcIQi4QxBwh4A7BAF3CALu + EATcIQi4QxBwhyDgDkHAHYKAOwQBdwgC7hBwhyDgDkHAHYKAOwQBdwgC7hAE3CEIuEMQcIcg4A5B + wB0C7hAE3CEIuEMQcIcg4A5BwB2CgDsEAXcIAu4QBNwhCLhDwB2CgDsEAXcIAu4QBNwhCLhDEHCH + IOAOQcAdgoA7BAF3CLhDEHCHIOAOQcAdgoA7BAF3CALuEATcIQi4QxBwhyDgDkHAHQLuEATcIQi4 + QxBwhyDgDkHAHYKAOwQBdwgC7hAE3CEIuEPAHYKAOwQBdwgC7hAE3CEIuEMQcIcg4A5BwB2CgDsE + AXcIuEMQcIcg4A5BwB2CgDsEAXcIAu4QBNwhCLhDEHCHIOAOAXcIAu4QBNwhCLhDEHCHIOAOQcAd + goA7BAF3CALuEATcIQi4Q8AdgoA7BAF3CALuEATcIQi4QxBwhyDgDkHAHYKAOwQBdwi4QxBwhyDg + DkHAHYKAOwQBdwgC7hAE3CEIuEMQcIcg4A4BdwgC7hAE3CEIuEMQcIcOVBLOAXB/GYWCcwDcgTsE + 3IE7BNyfAfe/ov+Mvkz/8Qtdmv/4Q//79+3t77hospK+fXt747f8rW7Af2QNXoVedWf0gfdodC+G + PxtZV7cD7tAv4v5H9Z++TG/6iV/fJv0ri95kZV1LL19f8W9f1Lcyusobr2r6vif+St79lT3wtvN2 + wB36TevONL/9JQzf2Py+0Xum9e/A/S+x+Zc+yeXiz93S09v/aGXCmLf9OwnMijvv0tAC0/fNb2nx + f7QXxX2SKvAm2wF36Fd99zeG7r9uafnD3258xWT//U/f/KcW/b/1Vv8Jskbe/9EbgZHVdYHp+xby + 6Vva3Bi5C8h2//VvgTv0q7i/HeNuBu6GvHAzcDf/GXWCxDNZ0S2u0Gdwp+/eZFf//f0PuEMnwf0/ + 9VCG4z6pkf+rH3lF8/e/lXV/Y3flPdynjjs79eu9Anfo93H/y/0lb+KBs+9O9Jo/b9pUHT7H3+GY + D5CnFe5/2cf/2Lq/0TKtT2/AHbob7n8UVH1l3MmPeVOjPPP+p3ewHOH+H5P8Me5/edd/eq8PcIfu + hPvb29vA3RDuf9UOv/VemFV3zBXrbnjbT1h303sg316pYwa4n9Z3fxNfg72Wt+6ojx7yd313bd5+ + jPs0Otx7xQHu0P2bqtxNzu7MxM9aVx2Rcw0YPTMz7v99EndxY7ihgH536GS48yOmTb/7yrprv/tC + 9ydx//tn1V8P3KG74P4faY27LNLHTGbGXftt+lZ/e//5n073W398+p8u3OOuexLsl+2AO3Svnpm3 + TrHgPij9s4xuefszj5/52/toRttW27djzIzUENP3vYyZUeSlcTC2A+7Qr+L+VwcRvL2pZRcfpeNO + b3VUV++Z+W9pvQrvPMTxTb99I3S7t8LGm8246fum78T95yW8b/GHxnbAHfpV3D9xqQ6vVR9NcOXb + j/diXooA4P4wuEPAHbhDwB24Q8AduAN3CLgDdwi4A3cIuAN3CLgDdwi4A3cIuAN34A4Bd+AOAXfg + DgF34A4Bd+AOAXfgDtwh4A7cIeAO3CHgDtwh4A7cIeAO3IE7BNyBOwTcgTsE3IE7BNyBOwTcgTtw + h4A7cMdZAO640LdTq9FlUnCu2MtCNZtS6+9LCO6mxbb0qw24A/ffknVho1w2+NWsSyMtbfI+3vDH + I+/QAXfg/kuw5xXojiSfyyhaW33dJmEzhNuVm2tazKEBd+D+GyVYLHu0HTrTKi11id+nuR5k/ium + 3d0O9yimPYZ0V9wNHVzO9dOun9Gb3m5xfe8eZbLl7fSkTnl3f6xR7E7Oyy5iAe43V5phL9uyGHZh + XGvynTrrZZj42+FeeYeJcL+vdW+eaM/+k4AZb/WuuL9R+Xdxb5On//U33Bb35Hlf3ueVV+czcL+5 + IzNb9oOSmNItP18DkzrtdKFyuNXh973f8HbxTdwZ4Ey4pljNJM3y1IyxtEIVo5+i5bueKfSp+jgu + Hq0+n6wUw9Qar8XGoluPZPRmwK/VF+Nr4m9k6ViH6gmz3fxyi2vRZF4UaZXGi2nHy28B93+07fa9 + duTs2PdVzc1alrL7esP680+4k3V2PvsgZrr5xh/IfWMUI7+drM/8QnQygcUTwLy67kHWYEZdIPvv + g8IbMn1wZL5TsM6HRCt5byY6g0HfCe6Nt6veDR+n0t2GtuJf5R9hW1/T/FvA/XtqA+T0bnVwrZCq + kU+Zl97IrRTjToa93bSr51u451IcORrMqK+GXgj5QJwRYQS+GF5vDZl1AndSkgl3AtxkxZrX9QN3 + efHdwbFcQwJtLs4MfUMgE+79nSprlXLdg6Hljdcq8pbKVde/Bdy/3yvyHu19hbq2xZF9+Bud9dJ3 + WEO9N+4s/mRd9lUYEyYZNeNbYcPrXWNrzNgO3KMfu+CNo3IeM6FJLXvfZM/UDCar73gjxr2KxWfc + 9d2COzstSW4zljflRck5KkeW/S6/Bdz/yXG3H6xg1veCdEPXI/dfd6HdG/d+DsjXiERuDEk9jo57 + 9DHnYiwtNGvcqbmqjo1heMsa9xz1cV2I2WZuiq9wdzPua+suq0uzoHK14kpCpaFViH5eefwWcP8n + 4x4/WMGte1Ec+zLxltWNfZn7PGW6xJ0dEMOEi8O+4C7uc2xph7vhbkX1P/hvVmeGrAE7KUnPUhRn + nz34Ne75AHe2ImmpPD42NvK6Y3aO0uP31twV9xzef6DZXfu6hj/yPdvesKEqzlG5O+56kNa7wm1E + AtQqXGRtGfDga/ZGcSdfR/wUsu7ZF9d7Jb2L0qwk9yNwE7Z0NKlZIF05Uk18YqfHZ/m/rPDlTiG7 + bER3E/ZifK5CemGbv/wWcP8+bu/44WXj2be5X+ZGvYZmVKZwJ19m9Zgp9hLUnK2tzBsfP8FV6Oh5 + SIULuU2tEO70yUqFT+KuZztuVbmKQ+OsOB7zvqOV3fN2NdtK+6qW/+e9loGv9HamPN83S4i0Ssq5 + ptp9+dVvAfdv/Xh+z3E/9GUIzHIrX2bssN7Ll7k+iMCcy2vIj94BeY6eGVPLO3Y1bV17p+ybcFNf + RnzWejLck/dnYkR9J+D+syqbfpvW2S+3ssWm77CGcLbx7iad6rqkZ6H91LjnTTdk7eyHW7Ur7bzD + OJ0Md+j1cE9h47pru9bEm7Ur9eZhSggJuAP3k/gyZW3rY72dLda2gA13a6gCd+C+w3F23fvomXwz + 497mHVrgDtzvrTF8zGxs/e2Mex07vKFxN80Ad+D+/ZbkDKNb5u9RVSjORXvZn/EubSk6t5qCHVej + MU2yn7tnmBrnWSjG7nx+G1ePgOctiovzeqlY4A7cjxQ3rruZ5zzxI8gjs6zTu13awTbobG43HC3P + dwvdn/tEg1UmGmYzV8d1CcYPxLKpiG71o3FfaOAO3Lc4prXrLqy1cODXtNn6b4JyuMHXPGtqOySB + tLyzn6yCcal/cX8z2pdBfbC8WqUA9xfH3VRyM3az99Kx614XOx9Wrda0nvPUtgy2LYxu67rbdLC7 + a+5QWOCtK47XLYHtjces9+wu7knA/QVxL0e+STl03fPKiV85yu2Qtb4Ts/u+rX0lt+b0o1bwauZs + 394d2fbVnaKsKm27/BHg/nK4L4E37AFadWsl7Xbyatn54fvJUa4T6S6/HZ52Pqwn7zWetUxuza7Z + FCrl8YXJq3LWy5lbwP3VcDf5wlZfeAGrJ6xbO16mraV2W6JaJ9JeVobtfrKNa8/pE8Z9OxQ/bm8R + ddScuvbILn0Z4P5yuMdj13nXDTmPFuvMbCpIWm4O2wB4do4zsMQx6D+zqQGxg5nNJzx3rS52XbHS + rja1UQa3qpXtsk4D91fD3YZD477rhuzcZF09t3Tgl+h7m1dt3qi1oGhnSVk1NJenVr2elI+dmbj2 + vdc9LhtfiauM6buya+New+UNBLi/GO6DFFfbO92Q3eOpsjjOsKo1TqvK0ooru52YqUO6mRnrdjcV + Fw4eEW3P1OYetHbd076dbDbN2c50PGgNA/fXwr3f4S/iTe9GQ/aPYiHjtCM37sBzm53ksVFbN4jN + xhrPq37iPhTX7NdN02H+3vSX9Qjm/iEB9xfGvV55vrMbDVn7w5vFeu7JXdvcut5JZBvv9p5F2tI+ + 3J5P+DJtXf/aCv1iZ9yTlsGuDyId9f0A99fCvVzBzG0br4paqd0C27B/VlnWNnfj8FfbmT5wvWfa + 0/5x59UepLKujpsHtiugq1rxTQUp4eA3gPvr4V6uOg5uSz8HSG3HPrFZb2bXzjZ3TqZVDUqb/qC2 + 7P+DTvd0scHGmaLCtLkCRamJvYLEdY1twP3FcXfXHQe7aSVybPd6afvzHqm8rjO59GqxaQ2Y7YOt + Le3mHbdr+1jJrlrVbRTLMudx31epay31twD318S9Xm3A5p1lHbhsnsbbBamtv9EBc303dU3otpvT + 7QYeXHe71kMbR31b/JrhzVhZr27uUG3XqqjA/QVxt4cTRcuuI75uH0TV/dPRPF0sX/WsrwnfPuyU + b/iR/wo7e2XkzPqx6/pOYVdl1XUqh1Vs2ydhfbX5GXGGM/OiHZGXDVWzM+6zp70ZM+AWEx7XDnXe + 3CFm9yUv8KW5rz9aGXewGuqero2LjBe9kFKcdNAbTztcnhWb1eZuPj4L3F8RdwKkfGjcV8MhVx/t + vudx0zE+7QaepYuRiTrPm1/SpvqV6QPc59RoSez0+nnsKs9UKOuamLbOk9s3VXlDyc3kvh7hyR13 + oB5NV6mfn6jY9FlI20wfMO2gv+oyyU5fLXHm0HytRrtVFqj4Krjby3FZaT880Wz9graw7zQ32WaM + Y926Mm7f0qQr1Hmtbd9QbteHiWn44WbavOespbebwpZ5gsfGW1/V0MktDtxlbqZvBMs7zuhkjha7 + zwcn01DAPTrkUM4Xa3G59znU8oirylkU/ZUqFldZoMKr4E4XP14s2fXZ7fhfJgXRmrbsxqR0h3w/ + 8HzuB2Ta51Cru27Q9E7v+3qMb+8XZbat/u52VlMsaRRh09oYvahxusS952bio7R6v5EsTZMpEi01 + jRmBzSRaYGWhZE7iwNWG16wjrKrsYCRhGvMX5YUDrSa7rGpM0/1WjpvKocv4j6aH8qmOYKh27NmH + 8TtGg8enOeA7laWN0nq9ItHPVUy2GcmgTOUDNGZqrY6EOnpY69mWz4m72Y8jcRcDxup2SV11/Dnh + uLsomo+1TetRxZvpIU7td51Ha8XN5Kf67kCCZVBZ0rpjtVvJbnBveeM+5XV9zZ32ZeDlZW4mDr3u + s8aXllRIVDuJ5yKJl5Se7L2TrEqSOUkitkswePrUe0Y93yd66qWepYnMsPeazcmOX+C0BZmtr+G8 + TpIvhBGVX9BlmbZrfX9i6vt+K23ZNrgXLQsdvU/N694Vdw6lnWUb+htoHb4j0AqSLUFNf5DECZ5K + JsVsT4w7s7CKo2wOUhyUbcu1jw0QFzrNuLOldwN3dzHMsufp0PnU88jz/txfr5z7YCDBmF9lzJwY + re76hgTstbvueo12Y1Z23PzGPjdTpCVW05AZycaUOI9BjJJZUtFwRIPkYXIT/zxTWkcuguKVvMak + jSRMmqVJPpWak+RZMlNxinslhk2VPGRih73r6aEmqQbVeKG2+zSCKO/PTGrdG1cmBlSzOHFpXRzO + DFUB9tCs7Nhqqqkw542iF6rOZsZd0ktt8kU9Je7CrSt6Jy1H0+ji1tzHMTZAltWRbkayC+tjHrcb + ADbMfdEnsKuxBmPGRoz54+l7rURxU+rWUXLLkPa4In+kOZM61fTOYdz2Ny5zMwWC3mUGX7I0cQYC + uv1oaqagv9ZTbTjNnEQ1RPLn+cB+cpo9Z0nAwTk6JEuTplwippknNtz94bLk1HMcHSSzQdeUZ/LD + RtMzGU6ZwAUqHXcx3HO7IHlHJ4WHXGtZyFy3acGdlvFuguzBclo/rhAmcsaFnOfdhDlj4E+nSzjF + 1Oz+NDRfTMpbuzfm0quI3bhm6U5soz2a9fHSBt+8cuZX00G2k5rC59qJuwRqddQYt21zqP8UlXA7 + +/yHU7Pn3EyMe8yOnGfJ0kRuORFTqqRmSjvcJXMSIZa53JyNiTfb4C759aimpI47ASg1QvOoSu4y + zg9iuK5wLqjIC+SHCXczcA9cILXuThH1cdugFR8rSn32Pi5N1UnzgwTZg+GjrZIcU5Kj5amnLJlx + z+YlcF+FzTgYIWl2HTW78F9x2WhpTrp0MGs1rG4Ig8ttBIFPeY1mH1zDHe8ircfAz62JzYypI9wF + kiis0NW3nJzHS2qwUje4F82c5CT/pLrvTWa2iOsQ2sBdszTxS2Zng6w4d0fWnu6GnfukjhB5OHyD + 0fRQddL0TIL7nF0kS5YbckzSDndZ4h2XVv2UGffU8TdR/Cr6ec3Fc4S75pTK+dlxZ0dh5qEe4lUu + +kjiZTe8na202c0dWk+r3rQFytGU7o/6TrdGejXddtNLvfHM7MEKl7mZBF+i24ubQO07cuorZwf2 + /GG0/MgBnpbMSYooe/+9Q8+HsqReqmyrszoWjZ17TzZU9qlN1SKZyqiSCbB+pIcq9L4ELoah/dWe + XsFJgYokNO5NVfbP2YuRsqSxv7h0ezpJ5lq8eEVOGsrVaQbY4cxorig+tKKr+PrkuPMlt5wr+NqY + ArPvmLerhydu9F7ZOeF82XXvyGd1WdOmttRv0b69A41WbDpY1dnVh91lvMzNVGSEGVcKaoAmW0e+ + pNjLPnHiJe4hXDInsZPj7Cqxkg5RK5qEaWRp4qRPks3JOm4tuV7u0hvQTvoExQZYTg9l6UdyzwY1 + r057dvw7hV11nbQSHbcNCq0jZaFto642ae6oSZ6c6R64dWG177kVPpAy3MGqJS29mMal58f9uqqL + GzfD1GKvFTxVO1eCDZKpzAEay8bMtjjXkU/0I7lwMOeUG9nuIO+OaeuolfbCMb33IIK4fexkf9Z1 + 9u7uJD0E7re/kdTNdTW22k+dCBNvG4f43rhvR3+WH+YxR+D+SCpfdfPPjvvOCKSnv4LA/dNK+as9 + lg+G+wsIuH/NtLsYPp7X+mm5ivMK3M+oMfDgM9H1IOD+2KdpzA5JN02YAwH389r2NJ7hJpwR4P7s + fnub0/XhjAD3Jz5J82gYGHfg/hrGnTtR4LkD99fw3N3iwjecEeD+5LjH6ThmNQTcnxD3UKq75QNV + CLifVDGEW4+WgYD7WdWuzrSCgPvzyYJ24P5K9t2tQnRAwP3pT5S1CScLuEMQcIcg4A5BwB2CgDsE + AXcIAu4QBNwhCLhDwB2CgDsEAXcIAu4QBNwhCLhDEHCHIOAOQcAdgoA7BAF3CLhDEHCHIOAOQcAd + goA7BAF3CALuEATcIQi4QxBwhyDgDgF3CALuEATcIQi4QxBwhyDgDkF30P8CDAD81KaHNg4vYAAA + AABJRU5ErkJggg== + headers: + Accept-Ranges: + - bytes + Access-Control-Allow-Origin: + - https://huggingface.co + Access-Control-Expose-Headers: + - X-Repo-Commit,X-Request-Id,X-Error-Code,X-Error-Message,X-Total-Count,ETag,Link,Accept-Ranges,Content-Range + Connection: + - keep-alive + Content-Disposition: + - inline; filename*=UTF-8''invoice.png; filename="invoice.png"; + Content-Length: + - '24463' + Content-Security-Policy: + - default-src 'none'; sandbox + Content-Type: + - image/png + Date: + - Tue, 29 Oct 2024 18:07:26 GMT + ETag: + - '"78f9c64a3ffcfc436d52d523f3e3c5ad26c14b95"' + Referrer-Policy: + - strict-origin-when-cross-origin + Vary: + - Origin + Via: + - 1.1 b52390676bd7dc3663adaa0cf42ed602.cloudfront.net (CloudFront) + X-Amz-Cf-Id: + - ShIl6beONTCXPjDkk2V_FiR_kLlm8VdULBP54IyTn1WVfp2K8D6roA== + X-Amz-Cf-Pop: + - CCU50-P1 + X-Cache: + - Miss from cloudfront + X-Powered-By: + - huggingface-moon + X-Repo-Commit: + - 2359223c1837a7587402bda0f2643382a6eefeab + X-Request-Id: + - Root=1-6721245e-266bc7fe5637241c1cd5fd40;dab98c1a-2513-47bc-b495-f14b7d0073c0 + cross-origin-opener-policy: + - same-origin + status: + code: 200 + message: OK +- request: + body: '{"inputs": {"question": "What is the invoice number?", "image": "iVBORw0KGgoAAAANSUhEUgAAAu4AAAQlCAMAAADePLi1AAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAA+5pVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADw/eHBhY2tldCBiZWdpbj0i77u/IiBpZD0iVzVNME1wQ2VoaUh6cmVTek5UY3prYzlkIj8+IDx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IkFkb2JlIFhNUCBDb3JlIDUuNi1jMTQ1IDc5LjE2MzQ5OSwgMjAxOC8wOC8xMy0xNjo0MDoyMiAgICAgICAgIj4gPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4gPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIgeG1sbnM6eG1wTU09Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9tbS8iIHhtbG5zOnN0UmVmPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvc1R5cGUvUmVzb3VyY2VSZWYjIiB4bWxuczp4bXA9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC8iIHhtbG5zOmRjPSJodHRwOi8vcHVybC5vcmcvZGMvZWxlbWVudHMvMS4xLyIgeG1wTU06RG9jdW1lbnRJRD0ieG1wLmRpZDoxM0YzN0MzOUJGRjExMUVBODJDREU2NTMzRjY0MTBGMSIgeG1wTU06SW5zdGFuY2VJRD0ieG1wLmlpZDoxM0YzN0MzOEJGRjExMUVBODJDREU2NTMzRjY0MTBGMSIgeG1wOkNyZWF0b3JUb29sPSJJbnZvaWNlSG9tZS5jb20iPiA8eG1wTU06RGVyaXZlZEZyb20gc3RSZWY6aW5zdGFuY2VJRD0idXVpZDo4MGU0MmU2OC1lZjRmLTExZjUtMDAwMC0zNzM0YTAxMTJjNWEiIHN0UmVmOmRvY3VtZW50SUQ9InV1aWQ6ODBlNDJlNjgtZWY0Zi0xMWY1LTAwMDAtMzczNGEwMTEyYzVhIi8+IDxkYzpjcmVhdG9yPiA8cmRmOlNlcT4gPHJkZjpsaT5JbnZvaWNlSG9tZS5jb208L3JkZjpsaT4gPC9yZGY6U2VxPiA8L2RjOmNyZWF0b3I+IDxkYzp0aXRsZT4gPHJkZjpBbHQ+IDxyZGY6bGkgeG1sOmxhbmc9IngtZGVmYXVsdCI+VW50aXRsZWQ8L3JkZjpsaT4gPC9yZGY6QWx0PiA8L2RjOnRpdGxlPiA8L3JkZjpEZXNjcmlwdGlvbj4gPC9yZGY6UkRGPiA8L3g6eG1wbWV0YT4gPD94cGFja2V0IGVuZD0iciI/PkiJ1HYAAAAwUExURebk5qKjp9FRRlFVaQAAACAtWGdoe+ShkLa1vTw/Stl1Y+/Kvnl5j42LlczMzP///yeDOCAAAFr7SURBVHja7J2JgqMoEIZhOAZ6Ad//bbcO8Irpa9Idk/z/7nQSowb1oywQqswEQS8jg1MAAXcIAu4QBNwhCLhDEHCHIOAOQcAdgoA7BAF3CALuEATcIeAOQcAdgoA7BAF3CALuEATcIQi4QxBwhyDgDkHAHYKAOwTcIQi4QxBwhyDgDkHAHYKAOwQBdwgC7hAE3CEIuEMQcIeAOwQBdwgC7hAE3CEIuEMQcIcg4A5BwB2CgDsEAXcIAu4QcIcg4A5BwL3LdukO0vho01ihL2r0tu2/m1YLlxK0Gp3L2cWyWdHYizVpYV/XbdddyrERrjP0T7ib0KUoufExuLFG1s+F3pbx5XoPsS9rA/ZlHyHkFaEp7NZ8Z90YDoV72NkxNKZ10dsHwn2G0i2420tg5+9z/1h3hDpzHffr6x7jnoH7mVFvaXdTTqmZR8HdXuLexnfpEvd+NyjXGb3A/Z11gfvDoW5tvfQ+adGPIH973OMl7qY7NqFe7iAe2uu1W7TH3b6zLnB/KNivNLVmK9/Oj3u+xH3+ulx65FID2iGl5RD3ue5sVIH7o6l9APvc03Fq3IfHssY97iz/ykbbD1uYO9zLMdHA/cFgt59UOznu9RL3su+2WRa19c5ojRjzbk873N16XbdrMqBn5jncmJ8C/gdwd5e420vu4mqJ3ZrhuN3TFve2vYvYbZNhbOqotT9r298PPZBp7z68OTHunek17gdd5+uOmbL72m3o325sdz1AdVM1Drwm6HSm3X5Z7cS4pwvczb6Xcn4KFY8g3fbTb3Hf+0VmU8mA+9OZ9oMH8ufCvVzgPn8/90S2cNBxU3dfasXZ4n4BtHv3W+hsSvZbuo1D8xO4u+u4l30/pF1/aw93fox72TcCEnB/TkfmaMjUmXBXv2KD+0XXjF1DCtxB++/w/iO42wvc6x73clAzgDto/2HefwT3eIH7RU9kXD8dAu6g/Xd4vy3uzi02fIP7xSAxd+DmA/cnl7X35v22uJe4gLnBfR7oYg/6If8NdwfcH6VPpv4z7ulcuNulR3GD+76vsW0+/hPuNhYROiJPrmZvoHQq3NtC2xb3HYfp0PP/Fu4bAffnddxv8Xz1trjH7qTkC9x3PZH1cNQXcIfj/rPu+61xj7MjvcXdbgfqls3IXOD+Ao77rXBPZ8K9zj65O56jYdZYbgdPAne4Mj/sztwa9zZzvMXdHI4fiz+Fe8hDq3nb0BMY938Lo3Jr3Me7Pe5bpE047La5Ie6Y2/F8vTI3MO83x3123q/gXtb9kBa4o536m63Vm+NuB4073Mvaq7aH01qBO4z7D5v3m+M+O+873DeDxOqWROAOz/13vPeb4z7eth3um57IchijA7ijW+aHzfvtce8o27jFsq2h3fZDAncY91/qe7897p3OuMN90xPpDgOO3RB3V+Is4P5cDdV/aazeHvc++NG5KyMX67Jx/THc8ZjpeRuq/+LN3B733VSPi3Hp5aIfErjDl/klb+YHcK/HuK96Iu1xbOzv4T4CgyPwxvl9mXoU7Ld+Z4k5De7pGHe79ETu+iExm+lV+mXSZRjUdLEsHcV7T7uw2O00uE/5EPfVILGym6cN3J/ada9rRg998HZhttt+HbNpAdQT4R4PcTcLte5KZCTg/tyue9p1wS8e+AiRmi42S0d3iW877z+Bez3EfTWp9FqnDXB/LdyNNr3MzHKa9kvs5ZJ/ebD6E7i393G30z6CHnB/jZbqDneJdc7N0DZYNuLG0BIz/JnLJf/UVv0J3DddkQvuo2ummn2mJuD+Ii3V/afx0ehNoA1Pv9r+3eU6J8S9HOI+fJyS9k/3gfsr4t5mfmtS023m76kpKu785Tr/1jXzI7jbQ9wHtrHuU3kA92fumLmGu9lmiDbij7e67143R+9Phbs5xL3NOWau4V73O09fwB1xZh4Y98bv08pdqfJkaWPRT4v72nlfPezPIbzfiI27+8Bn0hnMG+Op6uPh3jY98jPuI8nQFvdtr/2ZcC+HuLvD3JCr1UeGvLIZu7vFvV7Jspon4P7YuKeB++jCSPUC93RK3NMh7rvB6OmiEdu3T++lItt9OQefjMAduN8J91Wq3xXuu4yo7cKrJ4bJY4u7bXd5zFa7rrXs7hWXgTcQe+PxnBlDOsT9pL77FI9wt1dz+7rjdKjtCPf43rrIq/rAPTNp6ZkR0JN0vz9AU3U1juAgL/xFSuHdN+E4fGonur23LrJmPw7u7aKTcQV0t/RrxKfz4t6OcDdHgL6DaTvE/ThLvAHuJ9X7j5nq7jHT1Af6dtwv1zkl7ot7sp515I77Ia+4M3Y6xv1o3TQB90fDXQcIVB4y0AcIrIhuy5LVOuccM7Pmrlyz4dsxbcZdo/0g5fb1dYH7+fT+ELHUU6bOA8LMnEQ1zcPI1uucCHd32Sy9ivt+xPJ2zp9r03Xcr6/r0FQ9ndLWnnelgawxq3vAaklfdLnOXca7j7AWiprGuXD14nu3NuLJxXfiYZjq5rAZa8+sHWxxbd26/oVZBcydAve2b8GaC4/HXDRxzYFX9Ou4/1QzPqXUzO3Xhe54TetV3K1ev/XEvHlJvVhyx8l7EPSNtuqi9aL9ROyPl/x2JAII+npb9e5hxIA79HvO+8OGVYKgb3gzjxo0D4Lu6M0Y4A6dtm/m4QNeQ9D9vJkG3KHXaaxOwB16GW+mAXfodcz7BNyhl/HeG3CHXsa8pwm4Qy9j3htwh16mtZom4A6dXvdvpwJ36NHcGQPcoZdxZ9oE3KHH6J2p/wp7TRNwh16G9wm4Qy/DuwHu0CPxfl/agTv0IPa93oB24A49iH1Pt0AVuEO/q3afPhngDj0O720C7tBDyqR7uO3AHXoMA99uV9O+2+Iolf4r5nh/jb+9HsevLqFTJ1tQ42Dgf7yN+o+4Zy/aNyCyhryO+m29srH36/cNVx/A/wrs/4J7aZcd/8lretPiXUtlTfVG64fBAbgD+F+C/V9w74bdqRXnF0umWnkvPnbDTcxnM1Uf5RtDN4XQJpepYjgfFHczeZflRhEP7hfQEwP/gQ9/+3Dm/+bM5Mn63MiKRx8b4UtLo+KeY8n0dfXZytc+Zu+oUpRKq9ESWqqrBj8Z752jlXmrq3cE6EmJv2LjfyZ0/z/gHgIBPBmTAvOceVZV8q47M15xDmStszeW3zPJxsy45+HMGMPvaZknQ4+W6wsSv0c+tZ9KU/GvzowRM9/ED8lTm3Ev7N0zzMHn0Cr7O1QrmnxW3OMB7rj2L8u8IepZxvykvftX3DN57Nm3ZuWtmXEXa16lIZoqN11noId1j4fWvZBrD/sOnQ53kYnekWFv5Mwkz+Aq75FhZqc9+cAdNJVMf6ZlVAOyUm97xZj8gjutRS1ecvDRXIXOhXsM7JeIZx5KsOLMWO6gEZfcSpd7JHemSlcMVYrANYCqhIvZSM9M75R3gSw8fZWzuPyZV0bXJHQu3L9YOa4+cYIg4A5Bj4t7q2h+Qi+DOwQBdwgC7hAE3CEIuEMQcIcg4A4BdwgC7hAE3IciD1xMOev4AIkukHIo8qk5HuaV3Ie77+P4HQZBQqfGXcbBNJ5vx4TLgF7Ln2Rgb+LhkVP9cGxjC7qGL7gU0IlxDzLgNwQetjv1Sao8+rd6I7gnqQONI9Iw+cborPJGS6yMoDHV8rZ6bxhDyGRlY+Q7DlcDow+dAfeUGyPKtrzxqPfGEWbYM9nibqgi8BTtKQcNRUB1hAfKJ5744YPJXs17x93JyslnnuLHE2DHJBAIurPvzohmzxaaTXFQMPv0vea9TP8gdI3UBwlakIhnmcdBL3xfyLZ1d0dx50pifOJ9JN+afMLIYeg0uDeZlbfgbjR4DIckqKVmhtlGntcqt4LMdwPy72kJh+QovF7bOjPR+SR1wKfio4sjYAEE3R/3ybiojovgPqKIrZwZckt48ilXiRgax1DKLjvy3i3P/TMb3Mm3yQvu1OrNOcJ7h06De4zsYk8d9zR3sCju1Yu3kzruRrx4btO2MkVu6OYt7rxyW1l3WlQsrhF0CtyLRNjQjshJowmQPc7aVNWOSPq/lMDh9OqkUTpoiasScKxwtww1V2Vr8olCtT6SA1QZd3bavbcZUQmgc+AufkYNQd3uUicTmXbBXR8ztWjIg8m22ikuj51oE44xEwM/kipBeC68YaRPvLKhzXjvJoeMoATQOXCHIOAOQcAdgoA7BAF3CALuEATcIQi4Q9CNcLfyKLUejlesmqnArZ//p838DR0aUIzVNWNfs8nuSpalNutcKCOj422OuFjQvXDvwyDDIe4yrMBtpjLFTSIaL4PiebiAPF8dIwWClz1LkHjHg97b2E/VUfMQdBfcnRDLY9dlgtKcUMdoXTA9X0HrGZwEd9OWysIbUq1wMhxeLX+SCSCF98sDZjibU55MkB/ybjKY4AfdzbrnkBX3zJQaMtA8MzWqCeaR7XnSlDa80OfiZ6zZjDuZ61c0U1O/RTjHYyAT3zXoW3ZhciDPhmcAyizADPMO3Q13ppX+8qDdLPM2HKcey91Qi1/CfkjmQcDVxDCtvBFveY6fL7qm2Xs8Tl+tOEyMep1vJRB0H9x1Ip7PttI7x0N6nQxrV2C7kWa772TG9tr3JtDZrJdpm9ij4x51Lz1bmeI+AXforriTRXeMuwzdTaHmkqufW6tm6hNNvVXcV1acQSfXJpdO8xZ3bQVrckpYd+gsuHMePTHRTSZpu2kJHKDxCLzMcOJEe0xy8GvcOVdfPcI9a7Uo40bBKzT+gKgE0L1wly5InjhNzkji7MDsuS9zj6R/hZNn8+zVrLiLf6/ISv9jFtzrDvdCzVqXm6HK4PJcH7Jv2eORGHQn3KM89clqpdnLSJl8jtndsLmbaIkXwzmFHT8/4r4Wce3ZcresUfeWIlRZKQeqNk1e4rxC9gHT+KB74f7dWgKHBHod3C3mnkKvgzsEAXcIAu4QBNwhCLhDEHCHIOAOAXcIAu4QBNwhCLhDEHCHIOAOQcAdgoA7BAF3CALuEATcIeAOQcAdgoA7BAF3CALuEATcIQi4QxBwh6Cz4/5AtcmYZzuiK6U2wP3LKi46zg9pQ51qkAQcNfSI7Mm5GKOzUwkh1wc5DzEE16joHK2YCs3/inOuyAmqckDOyBHZEx9Go/I3R6e+uTJFOiC6QJauTOBLZDlgMx9B4ti0dEBRr36MdKgGuL8jF1h2iiHKPz5roac+rUG/TH2dR5AcjyM4OPkfvSl0MJmXSbhtPdpAX7NOHHmbTjn/c/THUfktX5XUpNS1HxfLTKG/MgJhDkQO3K+I2G6uYxFD0UU9s16zBHxN9H1Z6sC5lRhiuvyNca9UZj4uKn/KcudKKQZnkzn9EXXcAzHsqKSVa2qLcjxOjouPx9G/TMdmrVz9RouDA+7v417o9B3jzmcwcMoNQqg+xom0bOmIjB3udoZbjsOEsx/RwD2Zfm24DvNx8G2LjsvwhSl0ofJyj6KaYMXBAe6vgrtYd9PUmXkC3IsRa+4mrr+Ku7uCuzpoL2Lev497FSSeBHcqq2a1fArcXXZN/Pc88esed9OvzWzdqz2LgyZJpMl31O6BOfdFi3X1QiuYX8c9EyHmaXCvat/I1JXiZtwT+++Ph3shYySWnRvX5QL3Xr2l40YOjXy4k3SgZcnOyHnUsw9+pNzlXNN5fuF0YL+PO/datKfBnbss3HxnH7g7N/phHgp3ttZUSiqusdI0PcJ97jSLvNZJXHftIQpZE6fnnpqUIDeceZpuVl5ymvp7+O7xeXx3te+Fca92se7sBU8PaN2NOuN0eypyh7rA3eRQbNWL74Ip58I9ZyXd6FMPyarLCdqTvFDT4w7Wvci5rU+DO9t3cXc3HZFtekDc5ZGBkwZWpAuxx52fPC2+e3+m0M6Fu/E+96dgmlnX5SSp0wMha++Ce3se3JMrDPNlR+T0mLhHwZ0OwkkH674jMq5wp1pOa0dzLtwZJ+/jZMjgrHF3QfiHdf9HTybLvf9ZcLeCOx2V+jQz7tNlR6SR56+n6SDruCfNut44EXWSvOm5det+H9wd+bi5++6ulkIOIL+0BXfygY17mKeq1l4+Vd3hfv4jGrg3wT1Jq1tKHeenqsXoU9XI12w62UNV53vTVLtgyLzbZAy3T+nCsAsvy38f9+2YGRlJUpYhMor76UeY7I/nYszMHvd6Ijf3Ku5WEXdyGfiOm/p10IEEMvhnNWaGuypPg7vx5FBmsubRu5Ryx5rarZXaqfRSvHTJ/zruNhax5ClafV9Mkhdt+KvhsC67B3k4bYqMfpQHGXxM/K8uzznos/QBn/2I6MTTMdhR3lpikpaJlJqPaaou82HVIteMTRN9jKcZx5c8yYojQxqnOntfuDtJXnho52/jDkE/VGPN/g3XSbN++Yed4/xCL1SXcAog4A5BwB2CgDsEAXcIAu4QBNwhCLhDEHCHvnuZEUUMehIdRBHLF1HE3CaKWHKxB0uz9DoipkXDr8414D5Obbp8t6jG3WxfK2cunff8tXb5bjlPZT+MSmMSpXPZi29EEatz3I0oYyX77CYTH2g862/gnucJsu5iqqzxPJF8s3MfJ7I6/rxzI0KejyxfYOTHLPjlCO1Ets+XM+L+lShiHFKpysBtJ4ObNWKa5VHyI84YcN9Afok7m4lpO2Gcfolnk58X9/wO7oy62fE+NZ8mX0+J+xeiiFmd8FR5rpZOaBlTWer0dPpH3CvZPMPTmTydxuqcz23GY7KRXBpaZDJ/jKl433LOPp8Xd54rJkFY+LCKFH7cmniMuCyKLfPH2OjbxMcTz4f7l6KIddxbcHI/AO7XcLfe0W1+qjyf0NMC+SSeu/eODUWkF++jZbse2TxmX+pJLTzHdPAl+awHwmWlgmvdjHQ8SRw4Kr4vPKnGW0uge1/LiSz8N6KIzbjbUKpMTPw93HvLR164Adgn1PQXaue1KM7iNnxYcvY+uIu7623xPH+QmFjmVaUsk1F4Efv4HCUk8p1fjedZceeKmLzh6e5Ucjb3w0+zfDxNis4HzQdDDZEmZj/kk+H+cRQxs0QRm3EvHKLcrXB3xf3sLKfom76YqQeXCTyTiTwGmbzkG1kbbgGuwoc1vgPnbxvMf8E96D2ejF0Q3JkSxZ3tB/mCVAOCOgnZddzZfOR8WtyrVF+XZ9yj4K7H43URw0AQddzrqY7ns1HEzBJFbMY9ssefV7gvQaV+6IRrZ0YPCkaVje2JF/PINaCq+dF52hxFTDdoBFL77g31m7jzbEEyfFLn6Ha+x13iPrGdLH6HOx/ACXGvedyBqOztAvcqZqh03PMO9/NZ9w+jiE1LFLEZd0dfyddz1AVbf7LbuEcDi/qSgtw66exbni7MM7HdFDXURo8iRq6mTNqevo/QN3FPDG+mn05MvTRax4vsMfhqmtdF3QHgLZzifj5npnrLVoOnvrN9UWcmLM6M99aIOx+0Gd6dmSIdkf5suH8pipgdoWi4G14W/5Lv3qOBGWUmZjnfRu02W3j9fVpgJKyShg9rurYPv4o7t934IYUTb4uKSmetv4xYCbICL3Jq3XOhjSrnPnIndGacdMmw98h8cB9N5kLLAahjJiv0Y5kyuWXEjOcUTrLgZLh/IYrYCnd9GvV7TdUeL0YAppO44M6hN5KQxN0bqePew+eR6XH+d637/CS1XXsQYY6en5rzPrXoT1LbtceI5uj56emO550oYu5KFLGOOz97zVm89Xvgbri+DdwzR9wQQ8N3VIU8z7hz91j+bdyhM+obUcTI7lPjixu1USNKLT0zIyjcL+Beg3owVt3JSe+ZSfpCzNxAYtybdCc44A69F0XMXokiVntEe0ncYPm7Vc/Mz46ZUWMtuGdJVyAx3LOvqdTGP+19q9FqFDHbNzCEuvtuGDHg/lQysUcRs/soYvZ6FDF6Y/uzHfpgRgSyJSjcj1VOfWZttatLn2kbzduRpSHI3e7SudHDh8kGtscZA+7Qo0rjg5m2b0ttvh7Mfj+YGHCHXunuh1MAAXcIAu7rdsbSaDf1o2kAqZ5+Ftj2OUHadUk0kpmuxqC90gFv+laX/if0ULhzrO0RZTv6a23lqk8qxjPJMzeVvO+DkGQWVtZx7/xJHldzb4D0EVwZm3Q8bKbJVv0r4ytwe1TceRRbv5BOH4Qd8d7XCDKI89S8E9WWDsFWm+gtj5QhuOlTH/DOY2pa9fHa3NR2ePNKvrRUx4EnWPeHxT3beXCszubJZTKOEws3xwN+mqkxx270ZY2iT7VL5VlBPNNpqpJ3u/IT4Zbvmy/FcKezHggfVQhjKq4b01VkqGqYyCkrlSd8cpFlxhOvlKxMgqaTwl3W8k/6u3tqFT4ZPlnpEOfTI2tCD9ZUHYZv7CP4SozwDM7JW06MWXq6keizDDa13hU/BjTT/7HKQ7TA85zqva2/VYdEBlY375wUfQys1oPkEZ2VbmyRB3X4UKlWRJ3ZInMTqvOGqgePatLnhYo7bdM8Ae4TH7gMIC3Og/cHw70niipjArYMWyaHXkatWSODlLvJrjo+so+o1UmhU7X6qVvSe/u2Tr10TdfMPreZlmgL5LuHIA6clZuAj3LY/NS7CuuuD8o2MpVRx+/NuFN1TrTP1HT6kzp/YO+hcM864GFYd6Nj1lxoirtkwVx5KJZo8jLcbuDORp+tu7QJyTO4e1NOeNcJWn2kuxlxNSqZe1c13IbQGqMUufQ5UDk2+czZEJ3LKcdpa937pgQ+t2JC+I17GdoKtzslYfgwHYlu7zjbKw/jWeOud3brjQ63G7OEmO8YxgShZO7ZlGvRKOBSVrnR8LsyDnJ4bAN3Hkg+pdZ4roTMx43Jc19lo8PJzWUNyDB8d5P63ctlx45OMq39zNirxiHA1GzYvMy9s860nED795uq3NnSuhcgXklj7pve3gtde3VtejcOo9/nNukLOQbiFIwpcmYMj7vTaeizIkce2+5g5Y2jtsKdQxUkwpkdmD47m/cQstzFRjsk0aqNP8iIbvpATlKdxpo/odSHN453nfcYTA1oLXwb9yb903mg7+UyFnFO2AnO7KcaibvR15Z5/Oy0yNQ3vvHz6jmrdZcv7to1Y7VkfRKkHF5aTYncW/fMg1B1i6gpzJlzOUZa2HrombnffeCu9Sf1NX8Gd1tlwqmGDuvlp1tRCQ20fxd3vhun+TlkqzrLtxW5YdpijNE9j4eTqahtqYnH8RtqpfJs3NimZvSBZIp3vhpNS9gfjxo9osscn3xgRj8n6WzUr2RZqePzWN3QWTKbdbSdU36qmZKkC1Uj4FX+ZEYleMooSedvzhR0SfygjnEPy2QO4P7LFyTipP827ja4Fs7pzFi9Rfa4YX2q8DqKmN4xN1HEWrQPgzv047jLlLs17o3nZJ8yUiG32aIEgOh9ttzY4Chi9TCKWLljFDHolLhHJ5BvfXcN33Y68ZzT6qXfixvxqc/UuxZFzN0rihh0Wtx7MLw17jXYFsoJi1uLxk1K2gMcnDyBPF8UMei0uFt9XLfCXWMNnJL3qT/pSBIL1cwYny2KGHRW3PsVXeGuHs5Jn6pGjajR3fGO+/miiH28z0+E2Dpe0xwV7efrZfvprovRp28OFv4k7vpQ9Zy0z0ND1R1X3JcoYu0kUcTciMl9WGHFpmzGuJbNmvq0LyQt/tzwqBopSh7RNmqT6zirrIf4w0NmU9DfvfQvl/tmH/LFESIPz6Xv4yCjHNTSNjQ8vJ3nf0m4oj4NTI8r+ps+TVbATZRkTIbjKFlnT/xQ1QkAToblxQHUB1HEmIPfjiKmt5Nw7EJpHGy/rQKbFTSQeumDEPt31Uvr3MlJ8GSRuDWeZMCNjLT5yQtGZqQZd5RYbPVczNRV6pqjfUiVdJ3qsFQDmQLgjZUgw0lMlh5X8TbdMtmN4p4kF5kGOOXgYPTRnfIhU/QxlSIOjTIeZHjs+aKIZT8C+adMtqsVI/+S8GJ58B8Xv2U2hNa6IgN+y2przUzGcdJiL3v0XHvkNkAEBBliw6OtpPPJTj+bxSz44TrWXuZCh+UkKWnl4bzG1mybRGau3OqjFyu3XLfMvQ5SRHVDfRltw+T55iyXj4NpRwkOzj3KbT0q7Ua1lqOATYbjg/XQYdalqc8gO58kYJgETe8TgSS68gmjiGUXfR8ZlXmyqmCbxhBCupReupMymzq+ixc2z7P76KsESpeu1DZMqp0N6fBueFpQS6M1/pNZEOaB7QwrP+TInnOm0T9+xuGk45eOQo7QBTFG9MKrxcWKt+Ckvvbx/GM4ZUpmXB4/WmJTkuPSGQAe/QWmvd+SuncUsSyd//q3Z7rg52Pjxqx2TgcDivniiavLAWj9EMTWifiGx7NQ0W9g9V/6Wj/nywx7wRnHQpbfltlLtclMpMqHlnjkeuPpiVZvOUluUm7eCT/PydH1Efxz9R57n+d/2bmXoc5xcKHfqFbfx50uJ11Yz1HByekIhic2jCsnT3uVZ011VDeRN+gLcr/k67qqBWOu93yLWHA3P4373PqMgb2RnmFnCtVyaajNJ+PbtYjBcs4peql9CtPYieH6Tx4cH4PLq3pgVwcUfZnWuMO6PwTu3FzllEuJnVgTsmt5Nz5ccU+SDMOvm2SS4kUffNg97m09J6SvICv9aAok/VEqaeC8qXlJwqS4+z3u+kJHVesyDpoao7nIts6TUz9PDlHc+/yv0fWmuMd9Kx46K+7isnhN7CKuzDLdVHEPfp3Siy1+98g05Zdc9711N3Nsmr5QsXDfHyfxKUly1OadxlFYcGdnxopfIrg3/SQTzyv3sSQJOaIPMpu49mVMbRohSTruHf44XBztTfanSu0E3N/DPcnMnSCD2oq038xMjybg8332skZv8aPnUjiIwu8ed7plUMM86XQpN7Co/qeTbTuv86W1jbrkHJOgGVLblHrH3/P0RJlL7nTI3pi0leQUBJf0oLJfcLd8Ltg88Mv8yMTwr8KXOT3ubYxInkyUfNI8s3qZb2z0nX7H6/JXpo7MXnr/14k+qxnZvFKylWSMpdeS5l0l99OTcVrUpLlUZPpJKUrjMpomv83llLJal3gKlpHEe/SpTCPAyDgFqTX1V0biKvmiHxC/cMiRfuAmRtB+fty/6zM8yQwyznq/3Ms0ZhgE3O/7cz/p66/zFsFAA3cIAu4QBNyh27S3STx+yUmE5TQVDismrWEJMCa59pLj9nWVb5yx/OoScIceTjp5L3NGVR3ynmWBBC2Ud9yFVGRJ7NE4yjzfD7hDD4e7tRx4g3GPgYe681NvM77iIcGSIDhNyZaQreVwY8nWBtyhR8TdkPUuZsHdzl/x0z4euJGDhhSzYuvjWSexAnfoBrib4BRx4A49P+4pRAX93riPEGH67JGfR1sNH4YoYtB3cXdFu11m3G2obYM7rXGPlmoPGN16dkMZR6pxl+coYjwRLBhZBVHEoM/gLjNW17gXacmaBfceEf63bbvE99chtRLx3iadDocoYtD3cY+2tA3uMSQNIzlbd1vu0O0uCVNCNl6jo6cgo1CbbxVRxKDb+e45NF18/6YqUU2FkAgrUecY9IR0iCIG3QT3Os0h9O6Oe5M8PlnHlzqZTqO4uzmKGIN98ihi0H1xj+/iTl/lLN76vXG3OnVfQ4Rxg2Lgvo4iFo6iiEU4M5DiXmqQtqht7KCT715L6U9VawmhJc1/6Fa+O63R7kH7PFc559STv/HHE0YRg86Kuw6Mif31csxMYcMudwHNE+7uNGbG+NB4epfnGFMpSmA1CSvnJIqYGVHESjpBFDHolGqFJKH9Ss7cr1EjL9GHM/yVhhWbTKEv9RmO5eV3SARXJURYlNiZkee3c48697JfRhEzc7zQcKcoYhB0szra+uPSdTCx96KIpV+PIgZBDyjgDgF3CALuEATcIQi4QxBwhyDgDkHAHYKAOwQBdwgC7hAE3CHgDkHAHYKAOwQBdwgC7hAE3CEIuEMQcIcg4A5BwB2CgDsE3CEIuEMQcIcg4A5BwB2CgDsEAXcIAu4QBNwhCLhDEHCHgPsX9ffPGwTdUeZ3cYege+o/ODMQBNwhCLhDwB2CgDsEAXcIAu4QBNwh6KlwT7bxPln0V3/g7hVKSjOKxe+aTdvSzgXdrTnWNuM4zNkv5Sh0fzMf8vowV9dFz8Rqrd+/NAsj+rf1izIOaD6qm5TxlkdpXAihTCWEzG9yoILHYO9MQePCZCoFly4EQ0UKwc2lbbzQpcnQV0beyyLaqDZerfCHxi92qrT+aeWogDZEes18jG3KITk5EDfOgmtT6odrx7E5009MuwPtmc8qF6PK36IXKU4Ts8NnO3K5SrBRDiSfCvcYMpW2VTl7wTo6CHOX07jFXYuTpiyvdKGDC3omubRGv56mgTvBoIsI98x4uExoFL4KD4C7Y2As/UmTXAKtwOMs5HE2aM0sZyJLBbkT7kkMDl+JKKBHPs2O4VfKpWiVTWbsxT8T7pnOc+SS07k0Rt4mOdf3xj1JWbh4jW0Zc+uYBjqjla16CkIH494aL5c/VPhMa4i1py2Ej8fAPdLx8gElvoUVucZNa3Ybh8t3Lr1AeRK7dA+rVNWUR6mRYoMyFaXItWLc1SbxVZqksKdyZgyf36K4M+RsQE5gD/lCUzmi8CBYFLrIxagB4VetqIq7mfpNKekB8DnnxXxlzKPg7mbc5Wjms0CrjMNV3MWXM+5eDifZEiYlit0Oirv6WSvcg+JebnLufwj3rJ8bn/aT4R71hiNG3/J1/wD32HGnddJj4J6DKQe4mxXuaeBunJvuhjsVbjSmxJEsciG2uGf2jgX3eG7c2Y7mPJ0Ndyv+ouKu/uM0Lc7Mddzpbvog1p183fKudaf3qeM+tryLCGXmmgwQO5HuAHc+mPoguHfX7Dy4B3HJo/C+wr24znT/Zw9xry6Xx8DdhniIOx+V6YdrN7jzresOHTPSbpD2XrBU6EPc6VN5DNyb4nU23JX3Fe6hE957ZvJ0iHuqYn8eAPdGHvEB7queGbvHPfTVfrljhsvAPS9EdhFjcol7Y9v/ELjzeTRnwr1yz4zy3pQGw7jXuO6IdO0K7k27zc6Pu/SnH+Gu/e5yuLMzY6bVifntjpki55eQz865Y9yNDeFBcC/374a89N1H7wyXtkpH5CSdkr0jcjmUHe52ehjc0yHum6bK0jOT7+a707k1UbuR+IZbj627eRjc6wlc94OOSH486q50RF7iXmbc3YPgbqZD3NuM+0k6Il13sKihwQ+t7UVHZJTHZfmcuE8H1v0suJf+mEm6Ayqf0V45P8Y99sdMtT/mPi/udfjuDNKncNdjuxfumZrUUfrVG3ksyXbrXgX31K07PyI8J+7UQqpStLPh7uTBeqZXx8/oXHViOFzN0lXxHu5WfN0sjzrSqXGPclxy/+eK+T7u4szwxXLaU08npt3hwhh5OFO4X5RvoKVflNJR0sEQ9aS4a6vfmdP57mPYkQ4RyzpCRAdLjetfr+A++nOkMyyfGXdpZ2cjfkD6CPea5mNr9xozo5Q4eUhg5AYa9WJlMy6O4N7CGERwLtwnU2ti42L6CFsdzHlvDBKJy5FEZFBqLDKMpMRqtKxc0JR4lX4+kuVBp3IYNpbGa5iTHM/1A5Xj0aLzn6RFNuMs6Cjbfrjyymei8cp6Yn67vHI2qYBcRjr7cnb7QfDFsVMvfrrdub/5QZqGGSPQaU0CTgEE3CEIuEMQcIcg4A5BwB2CgDsEnRT3f09n8IaI/NCjpDMg3N/e3v5oyppd4pp9HpvDvDZ/9l/82bz5c2WT7XZ/Rgn2m78thTsUf/dnV5TVnv/sC/rnsFR/rh3cssPLc/DnWuH+7Av4ztnb7efPta/nku+K++fyFP1Zvv5z5ZL+uTyJ27X/HBXl+pnYF/7P27uXfvvbyN4BQcAdgoA7BNwhCLhDEHCHIOAOQcAdgoA7BAF3CALuEATcIeh+uJvcPr+qZx2tn67sJGXzwS4vcii4dwPe9G9dfN7rzLnYJhs8H2n0vs4nn89l05c7K3LZkhaxv0xaWi6787eIdfYjR2mO8T1U8zlGF4/K4a8cn/1g7+1ywxze26BHgfL5eWnnU9K8S0RR9Db2M+RD82F+ua8qn37jc6LX/qK0W+cT0U4v7ZS4J/8l3PXMt1Q5TQ9bIZMa81+GySmZPrXUMltfE2PVvacUK30Uu9VcrrykSGIOXyPXH1nBOke2nrOIFHqtecQBs7I33S5nm/tKNkvNW9Z7ilu4FwuZPZ0EP7Hd7PkmPJ1X36y82HtXSObbeS4VMa6F4xoZpbT84vMpcbe1fR13umeFFnymi2F9yGRt6PaV9FrRO678mY+XzorXE0GvTj5G/dL1U8Zb0DtZIUb+xjLJmbaiPcxGg1ex/G2kX6KXyitRFQu0nqOf9M+DeyoTuy/MOJkCPuFRjo7hom9i6FjdU0WsTRYLn/RF7JjnyH/ZyEs4Je5s37+AO6sSaEnMD1Vq44vUAq0INfP+TKJPVut97PXeq5PCS6Ncu+y7MXNiCvgGzbafLnPOvFHhDX2RVTiOdVptxy9OqMhiS0xs0xNp4E4HzkakKO7MTyiK+93DeQ7c6bor7lVISnzPF6Ccfwbcna2lCcdyyn0xvDUdbLf7xpHtZaj5xpvFQsneOWQm3Q7Eeicx/T3xmVgEqggSRLOK3SdjLWRn+TTfKVfbNW+ya7ICVyrvpumJcY8Phnt7Hty7MxP6DXXgPqw7ORyRzHL1iuQKd3HnaiSfnFyTQtfuAHei2YonE2mjzE69hNbkvU3stOTK2zndd0y+UNOAo4QS9OkZcaebJJ9Wt3ZmxNT7chbcZ6OWBh58v00f9Tc8Hu7sVFjf2gb3oB6OWncTpbllhnXnM5CceC+0ccfd9HYNJ5jj06T2m/Yj11Wbtnz5qQpE3W5xZtivjRM3XJ/LvvPxRj1Mpl56QcR00nllsKpvZ8CdL23UC8V10WhJuY2Wb3JFfgr39HnctQ9Yjs4HJ03VJrcyNbDUDHXZJ8U9Tdxw1UvjxSbTFmy8Hfsgs3XXlqa48PxN5mphpC3rejs3ekf8u76dNlVpvcgLLK8X/FM9gRPjHejwK10caY5Xud05tRTuBJVbWl5clDJKOgkMWQ1evkXnwQ89Zqrmq6s2q2567YvIpWj6uMhmZzgguX5Bq/RY5BJ9Xbcg767QCkkrmUmJNqEVmnxvadfJcodR79PUbl5ZZWxX2ObzDsnzadL36Z7reTN7aHIypMHCB9c4yH3RDteY6/2LmDSpebZLSbnvWEsrFJwVdwg6pYA7BNwhCLhDEHCHIOAOQcAdgoA7BAF3CALuEATcIQi4QxBwh4A7BAF3CALuEATcIQi4QxBwhyDgDkHAHYKAOwTdB3djvxCBq3ERTPt6QWp9Z4dSjIbru78ycrJZ/Vxp2JNWT2P2WtXwznYuUYfJppPinnz4fGziJqt+Kg5T3KyTr/3GHP7NevC+kwYBJuk5qhoJu7+cQZXYkYBnI2a6CRKvs7+cEfcvBThrcqY/hbtfm3Pjr0UCAu5Xz3XgKIHRtyaBMiW6IEdv6y9nKCEVJXqOZDmF0K2aBJrrLyfEvbk2jdjTn7kEHBONcefMKUljVxOwTsyN86FO/RvXo5RqapWsyUtsjEHifunnlkOks0IvtN3A3UnmkxJ9scX5Qnsrc+KWF7PtHPw1z3dFiQPsbfXvWY/fxb1IJFAOTdzjjI4Q5HVd8NM1Vb9g3dmd4WPzIWUN4O59j5SafXP6jeOgqJLNw/hMqJris4byzSlI6oKka9rMgSQzbzAiBfMFjZz4gBZGuk1G+a6kJ05Oc3ytmzgzYc7poBGAyynSGcyFjIROeKyA19Pnw1c2Rs8aryHXOdYrcR16AHLeTetZCHKPCKzXyI1kBxoLXoMGZ9vtVJXkD4q74bYPpzvpAYPF1ap8u3zBpmyQoNJGG0wni+/eeRgB9x8nnYH5QrDWJnmmkpc8MZw5hk6/yzW70U5hZ06/UcDllpYH/EWuWOQ4scGXfv3IU6ENWrfu7Apx9Si6rWRBKNwYyuYlcZ/dmFNadynKDnd3auvevuIlSNR6r/bcTK3RseVE/rjVbrNWfCnyjRm4B71sHXe9YlZW6dfPWh/JrxnWnf1+rh4r3KU1a58spvUncW8cA1lxP5/vbp0UcvHdzfl9d3a9bfsK7ok20YxM/I9b5mHdx8OJlDj+etT+FnJW2D/vzkwRZ0diuUf+v/imCZyS4s5uC7vpEjY8D2fGjlRNryWf538tji4Zc56eGUtXybKhStIzU+zokgmn7Zkh19v7LzRVk9iZJhHr87Yf02oPftWXrMcbJdmeGbhn6ahd1uR0BPTehWrUmZGMTOQPdvvAblKokoSvvhrtcvBpnKo2erkLvZyk373IxeVeOGmNOfFns+bKPGe/uzGmtc8/JTXzX1PT/HZ8We20fJN65gONzd9Pj+l9yLpKfz5oOfnB2EviLAi6W7PsPtUX7JU30/Y892eYJ3yqWsf16o9Te6FP2hH5W4p+gqBXwT1FXEPoZXCHIOAOQcAdAu4QBNwhCLhDEHCHIOAOQcAdgoA7BAF3CALuEATcIQi4QxBwh4A7BAF3CALuEATcIQi4Q9CT4V7z5ydMm9KufprVck7X8xdAX7w8MY5Q+SXX1csp1CRWHhWxFyllZ5aXE+LuvPt82LxtZPfjSFHJZ47rmwNQvYGS12BK/UpxpFh9OYc4njP9GRkxrM8ccbS/nBF3H78QYbMN3E1q/MlqXF75NJLYZIlozTHANLASr5KQquC71igs4FviyPAlqCdJ/cB1cQXFEjcx3yRI8Y+ERDVfiLA5jqwHxvNB0gxwDDzHPozsh4NJEeuOa7glVyl4m18ygO9NlHONpht35qn2sLKnKFx0XJrqq6az0Dj/HBI1Tbe4u/8MMu4ruZnScksohLs4NMQ6hz71uRvx6r2hvXL0x4lzE3C8TA9X/ns3X7YabbbzJ4vvLjGdI3uvcfZ1z57OoLovW/fJkJUpk1ZmDsHMYffne5qNQYPcF8adz4dLAPebPNnuIJ8Xd1v4NU2Pkc5g+kpupo505Ni9ddLo9XIBXGgrhzJ547LEhJQMBpwsDsD/A1NmcWbOlc6gDHLEvWoKhDmvMyMl/CrumnQmiocSJOmMpCxQ3DVM84w7fVnSVxLiQOsTHubLw/mByKTw2W5niXgtl1cvuB1OLpMQtdV6xp4Zn+IXnJnsXE7Jl+YkYVhynKtAcpAN68458zhjTxDco/TT2PR6mfNuenk4oYOnGyi3gvTlHNJcFJooLnCmIhsln5F1N+g8+gncTZb0kJ9c2eWcJQVHKKVMMUpkfSN5I9t4slAklj3noLROE5o4D9q/72hKAojQes7O8XIOyeUtksNgynHkGuWXG9x/0JkHvZCAOwTcIQi4QxBwhyDgDkHAHYKAOwQBdwgC7hAE3CEIuEMQcIeAOwQBdwgC7hAE3CEIuEMQcIcg4A5BwB2CgDsEAXcIAu4QcIcg4A5BwB2CgDsEAXcIAu4QBNwhCLhDEHCHIOAOTVNrz4KoAe7Q+0re+1WOsawphOy8pM05hSRB2X3lRlb4JnkYR+Ix5yUFn2bYAe7Q9Yvrc2t5lXBdk0mGOYPdkqn0/pn3YuebE0ZPPldXehFbpbJlb8oNMosC9ydWleypZNMLvdY2hRhzYpNPfzItNoENZ8y08O64Z00+Rq+hcVbRdRXlJPFlXU2BO3QgcgDa8A/IFwice90w7pzek3OUE0zBx0zf3xv3KknRpxg4QXbxIxdgTyPcTp41GzpFQ5VsJmGeOu5sP3PgJL38rniCS5N5ckLbu6cRZtybJtOVRLpeWx8PkSQeOols9tYM3KtAnjU5ufGpcPJgFzJ/dwrcqRrWXpImTdOOO6w79Mm+jqywEEUMUFzh3gj35kNsoZwDd+N98OxrNS5ekda2Vd/dyo0JuENXVcRAesewVDKTXu1noAVBjCX9zznZmz8J7pNpRvthuF6aKRktcxwvwB16x7x77b/O3EYl6+5zIIikkSoejKUGoefvTuG7q/HmdnWjQnFVpZqa6F2Y5OXfO2aA+5O3VaMTjItrzUzNxMgXvNTJRCd9NoXfGZPYkN67rNqtbrgkVCj+ZKmMzYlRb70fHrhDEHCHIOAOAXcIAu4QBNwhCLhDEHCHIOAOQcAdgoA7BAF3CALuEATcIeAOQcAdgoA7BAF3CALuEATcIQi4QxBwhyDg/opKzq2CPtuLVAWm1mrp39FVrXEstZE3NCmlxrvkPdazZT1YSmuKxnUvxQD3l1Lk7ARu/ngZVct41QG82YceOSn7zPHoOFpqnkzIofXI2CcSl1YrdtPAUObKYQH357XtzGtdklj4cnA5eaWDi8rhRLNXkiQuKocNM21ygYPV5XiuI3VU/bS0UwhSr/mTR3z3V1IV81YI0ywWj3BvwQcrZn+g0PMSOM8B0kMh81171WiT6SHEOK50Co3DdTli3aVwsiP1uUd2J6teNSRqIlQNcH8l0S0+MgPem+Zjj9gePRlr+uxWtwBasxn6jmyiGkkTXFgZx8yhUDOnbWr00s5m3CVPR1oHvE7Brd044P4a9j2IGSezbCSuL9l7Q7d6u4pnqriz4eYQo1RBTHfqk5l5d8RQyewvmMnmVjPnxzjXfczZvMGdjqPdgHfg/jiSpJGJoCUXRoP2Fp9DyIWdmdDWuJusEXNtR0SCpY8GaR61Q0OoT8HG7M7lz1Qfqnhhc7KahHQGL+fLyN/McaA1xD83OafEwX2N92vcGXUmOPjewcG9OB33LC8cUFcTZERJFuDTiY7UJq6pZrTHezoD4P5a1t2H1CLnnuHsBFl8d0dYpLh0ZCjuhuhO3i++O+drFEe/0LuaqLnK6ceCfCdZbIw/kzfDPTNSY7mck9UEBw3pDF5LkhU4SpJdlx1nkLQ+SCZGH4Yzo/mAyRnIzrH1Nh0S53vujiz5YBr3u8s2kW4CJYRztVZ72km5jckh8wuaqq8mo1C3xfPQNARt74poUgCzusQX6eL7giRpDerJDnRV2v72JunugTv0SuYCpwAC7hAE3CEIuEMQcIcg4A5BwB2CgDsEAXcIAu4QBNwhCLhDwB2CgDsEAXcIAu4QBNwhCLhDr4adc8Ad+oELXGIhRTtNxWXHL7qgTi1GCUpW3a/PVC0hjN+0rk1cFCqSRCbm9yNKVHI5SyhgflOAO/SRWlDFKcqrnbIuyJMNGoDAhV82tcZxAdwoX+LwaKLUq4Jbl50+pH4IwB364ALHSHBFlwz9TY4oL7zAxcJcCVi/jjvH+Yhc86bmwsCdbjVO4s7nGewYspX7AK1l4xxbBLhD79LFMVvIlhvGqsnbJDQF+ebXcZcfdAy1DQP3uQh1seMuFP2TJZRxqMAd+gzupuNu17jnoBbzPrgXpVfLUhZPxR3gHviNJh0B7tC3cCdzqW9+HffYvfQN7mTgJXZxyO7Qut8kqB9wf1ncU3DkJ9c74E4tz2w2uGszmssWQ4lHuKuvD9yhb+JuifIOlPv1Es3tzhl3botW7oxpR7iLkL0D+jbuRTon3T1wF97TCnenbVHuLXLTkTPjTLmFeQfuL4t77HTdA3cugvoz/U5T2iQeeiamHXpmoJvj7gifygt//zFTky7RVc+MiBoSKeQJuEO3x92E0Ty8Q0dkkW7Qte9uBXeqBHl54grcoVvhPkYXrJyH38M9zjZ8udN03PsIhw3uAbhDX8Y9cRNxvLUhN9PYbpJ1N639YrKaGHKq8uhose6R61+dbEqJyzMl26RGSAOW7wQJTVXo87gTN9mpm9DJd93QurD4D7+itOpXtGPMjMujc1LsfpaGtCxN3E/Jb9ARCX0Wdx2fIvlTBTF1JqJ0/P0y7lMidl2bVk1Vec5Upy3uWrIyhlDiMRP0mWuclCxTi9VsTZyaUnLEy3dN9ZtFaiHPvTRNypRKHSVoXF4t3rzULl8Dd+jhquB9wAPu0CvVMpwCCLhDEHCHIOAOQcAdgoA7BAF3CALuEATcIQi4QxBwhyDgDgF3CALuEATcIQi4QxBwhyDgDkHAHYKAOwQBdwgC7hAE3CHgDkHAHYKAOwQBdwgC7hAE3CEIuEMQcIcg4A5BwB2CgDsE3CEIuEMQcIcg4A5BwB2CgDsEAXcIAu4QBNyhe6pGA9yh11CLORTgDr2Givc+G+AOvYJsJtwjrDv0Eq4M0x4ScIdeQZFo9w5NVeglmqlMe24TcIeeXqYEdmXqBNyh51cKD+jKAHfoe7RzM9V7aafaBNyhp3bcndBemB6bszXAHXpeSTO1P2Fyj9RgBe7Ql1X8ynFnt8bekkgD3KETWUh5mupzd9n5Q/0Kze1d5kyMDbhDJ2umhmHR3ddwb7W8ey9oOi6hNeAOnaGZqrTP3TGM+2eHRbaoN4bS3nOUqE1gXPiRBgFwh75Gu3bKLOPc46dxb9VJbz1VlusDy2zg/s3UbxnG1gTcobs57kq7W0zvp3HXYQdd6b0fKNIY5qZwy+GmE0iAO/QFt90qsnHrfXzKd7duRfs744bTwD3Org1wh+7iyORwMTCsfg537c7xITv3QV9ODHWNewDu0F1U1TLnNH0Z9+7yh2ibqfndB1OtJNlp0Z2HBtyhexj37A/GhX3OmdEnU9l2L95+wGTtz67s13o5gTt0M1S6375rZX6q391kNe2Km0kfYVd7azZ9oZcTuEO3dGXC0YSOFj6Du6z1hT6WGfcA3KF7eDJq2xdPOtk2+/OxfWCv05XOmJbex70Bd+gutOsTomHcU3S5mMmoP5+jK+0j3LfYmtRo85irubZ+6s0F2c404A79mkaneR88YHoUgt569Wuaj4a7XFpp65yVluhhq7WtcJe7QqoGuEO/oyRP/3MJA3ftaCnrB6VjzJhxwV24KIyt2yyJfKeI2we0W9zrgrup+WCnwB36AZkktp38jtyNcZ+7l9nDyWH7oNSIId8782XfXe94Yneeq8k8yl2HB7d+uzBaTeT3bjExFrhDHyEiVHryPdgk17VrI5Vg+DPdrW8yz8lt4RZvJq8X8FZ58YKqHW2ENHYSF9z19yxwh37ekRFXRXyJohCmvKLdHLj1gcx+3tBp9t3zS41hq11Crx5RbwJcOxbck/c3insA3KF3e2Ssuu1VnJPU4VwcdmpBxt2wLwY5XDjlXEXWffarfTSx9QIz483d8+wRuV5LnPj54SYB+oA79J6Y5RBynf1yRnaxzPxpxr2PXYyHox7NrllaF3/IakeP1fok++xOjOLOH0q9yXAC4A5dl5r2UAYlBF7QuRfZzbirnV6sr9331SzmffVg1fpVF6YZgyRtrzVq1fsrb5n4BvDvY4GBO3QNjcat0JBdbWuP24r5rnWHO3fSqPVNx4PaDQ9CWKLspf7YSrwWwb3scM+pNXGBoqAfb+HNAHfommmXB6lx06NYmErxwtNwRBT3nIb7vcJ9a421TWtXXTV8A2CujRmR4nnbYdXpDuK04SAVKd2ibwa4Q9c7ZLLbAZZ4fMzcW6IOdlWy67C+yV8+aZ37bOZlI8gkN0ONsM+VI+kAnLqe5+e7oxP2T6qAO3Qz255DiHY/8ou4lI72SE1P+jowo4K7k86UuvLK86WzLe57D7Gn7Dudr9T8Cvcc3fzoaj219RbOO3CHjq17rQehH7mt2r10U5uRaUlWuZ3G8JZumWVa6+7mUGdT3bt3os5XSmvcDxR6p/8/O+/AHbpCxhEa0oTkFmeU7kLHnea2t0rj0spkQk06GB2fQu+s18eqRD7XgGZHs/cK7mLVbzGzCbhDX5Ha50BNSO5TlI6a1C22Vetr8iA0HvBZ9KGV6bjroMhahseywT3k3L0aNxq3EbhDv+njrJ8OMdBFLbbVJ6FWh7so7kdzOnrWj2IUd+3Ed244OWP32o5NLblVj+YNnHfgDn0d99C7WCKT2Abuefg0w/9w+2GQcn9Q3qXZSq5QEjM+oFavxpU6QvHFVQ/PDUJrA3foK2ph3XwsM+5p6kPYxwquez7OmFUrgGcw6bBfiRbZbwGLh17FtW/N2CXyxtzitf/e8w7coS/hnufBYdPKussUVgkOM8bT5LFysdUurgx77TavBkO2De5xtEvHNCa7Hmv57847cIe+IjuMd+ruRZyHdemY9plkaZiq56O4tup6PKUUlpVMWPXSmzzjHpZRBWOgvPl35x24Q98w7lkmHXGfY53Hucx9MnluXY7VudOyFaZVR5uV1aCavPTSqyekVadb9zpXnekWw2aAO/QFuc3oACNDFeuYecHd8MxwWZ4lla1jMwYAjykhdeCexVlPaX6E2tu942HUNO+tAHfol1TXM5DEKSGnPc72V6e06kOjtOI6qr8else0aZm3l5cxCNHNoWzkeda4PQzfP/VRxob8IgvcoZ+VyZun+v05qptx1xBL0wp3HnrDnZEXc/fKFveqc/4Wmy97Tb1+zQ6MThExNeZvPl8F7tDn26lzn7sY3D7lKPrN0F+rjA5EU3F1zNde7SqFNe5k0I3bDhngvcY+7XuJXDYe6X63RxK4Q5+WgB1H0GtTlLoWBnxNx+jaTe4m6XYv81CZjfM+vHOuLX2/oy7V5fHTEuF9ub18M6cHcIc+rTw8czbrqY4JHWUklDFRJuyx5d5FDdAx8ZEfIJFaK8tYGPLq5dGrjhsu+XJsZLxsPHw3GSVwhz4rHaPF7VN5JDrPTjVzfMgWJRZSCcFebjrGB9Qa87pzvmq0d7XcKXbzPoLJ582U15Ea6rvYAnfoK657ZXJD6g+K4h4iHS9g7L533CzTkwJpPUlpGoMM5LahTsxw3rl2ubJm1JScXfx25ztwhz4rp21KiUYQmdmcPk1Vc/sJSrQDt5vLGrJdYv4m+h3XOFLwdkfWpu+PEwPu0Bd8GX5gVCQyTC35S0FKySxvgA+x7I106yNqtBaY6m6fLh64Q59UWoJjcFBeanB+CR5av7plSE3bjJRcuUJmBMw25vZwAnfokyr9gafdBTf9AmypRpdZJd3nGIA79ElSXMdchw58fz8t3Q864A590nWfMxLkWwQnBe7Q2V137Xh0Nwm1DtyhB2ipihNfgTv01KrzWK367wEwgDt0bsV5rFYL/x6sEbhDZ8d9DOHNN4i0DtyhM8stc0bdP/VEAnfo/KC4xWOPD9sTCdyhz4GSl2nR9WF7IoE79Cm1Fe72XwMCAHfo5LiHhXF6H4E79CK4c75J4A69CO48aqYBd+g1fPfHHTUD3KHPgZJXQwcicIdeB/fyqF0zwB36nFZPVbkn0gF36IkVV4i3fwj1AtyhB1BZRXk04UEHiQF36HOyq9ik5lF7IoE79DmlsExiMg64Q0/vzcRVu7UCd+iZzfvKeY8P2hMJ3KEveDNpwT0Cd+iZtTLpBbhDT662dD/WB33OBNyhr5h3O1t34A49t+zcWHUP+lgVuEOfhyWPhBsZ1h16etWewt0E4A69hHkvij16ZqDnN++SA7tJxnfgDj27eefQeTHvc2ADd+gplbKmiQ+PGfIauENfc2ey5s0zwB16EfvuHUKiQq/hvqdaa0LAawgC7hAE3CEIuEMQcIcg4A5BwB2CgDsE3HEKIOAOQcAdgoA7BAF3CALuEATcIQi4QxBwhyDgDkHAHYKAOwTcIQi4QxBwhyDgDkHAHYKAOwQBdwgC7hAE3CEIuEMQcIeAOwQBdwgC7hAE3CEIuEMQcIcg4A5BwB2CgDsEAXcIAu4QcIcg4A5BwB2CgDsEAXcIAu4QBNwhCLhDEHCHIOAOQcAdAu4QBNwhCLhDEHCHIOAOQcAdgoA7BAF3CALuEATcIQi4QxBwh4A7BAF3CALuEATcIQi4QxBwhyDgDkHAHYKAOwQBdwgC7hBwhyDgDkHAHYKAOwQBdwgC7hAE3CEIuEMQcIcg4A5BwB0C7hAE3CEIuEMQcIcg4A5BwB2CgDsEAXcIAu4QBNwhCLhDwB2CgDsEAXcIAu4QBNwhCLhDEHCHIOAOQcAdgoA7BAF3CLhDEHCHIOAOQcAdgoA7BAF3CALuEATcIQi4QxBwhyDgDkHAHQLuEATcIQi4QxBwhyDgDkHAHYKAOwQBdwgC7hAE3CEIuEPAHYKAOwQBdwgC7hAE3CEIuEMQcIcg4A5BwB2CgDsEAXcIuEMQcIcg4A5BwB2CgDsEAXcIAu4QBNwhCLhDEHCHIOAOAXcIAu4QBNwhCLhDEHCHIOAOQcAdgoA7BAF3CALuEATcIQi4Q8AdgoA7BAF3CALuEATcIQi4QxBwhyDgDkHAHYKAOwQBdwi4QxBwhyDgDkHAHYKAOwQBdwgC7hAE3CEIuEMQcIcg4A4BdwgC7hAE3CEIuEMQcIcOVBLOAXB/GYWCcwDcgTsE3IE7BNyfAfe/ov+Mvkz/8Qtdmv/4Q//79+3t77hospK+fXt747f8rW7Af2QNXoVedWf0gfdodC+GPxtZV7cD7tAv4v5H9Z++TG/6iV/fJv0ri95kZV1LL19f8W9f1Lcyusobr2r6vif+St79lT3wtvN2wB36TevONL/9JQzf2Py+0Xum9e/A/S+x+Zc+yeXiz93S09v/aGXCmLf9OwnMijvv0tAC0/fNb2nxf7QXxX2SKvAm2wF36Fd99zeG7r9uafnD3258xWT//U/f/KcW/b/1Vv8Jskbe/9EbgZHVdYHp+xby6Vva3Bi5C8h2//VvgTv0q7i/HeNuBu6GvHAzcDf/GXWCxDNZ0S2u0Gdwp+/eZFf//f0PuEMnwf0/9VCG4z6pkf+rH3lF8/e/lXV/Y3flPdynjjs79eu9Anfo93H/y/0lb+KBs+9O9Jo/b9pUHT7H3+GYD5CnFe5/2cf/2Lq/0TKtT2/AHbob7n8UVH1l3MmPeVOjPPP+p3ewHOH+H5P8Me5/edd/eq8PcIfuhPvb29vA3RDuf9UOv/VemFV3zBXrbnjbT1h303sg316pYwa4n9Z3fxNfg72Wt+6ojx7yd313bd5+jPs0Otx7xQHu0P2bqtxNzu7MxM9aVx2Rcw0YPTMz7v99EndxY7ihgH536GS48yOmTb/7yrprv/tC9ydx//tn1V8P3KG74P4faY27LNLHTGbGXftt+lZ/e//5n073W398+p8u3OOuexLsl+2AO3Svnpm3TrHgPij9s4xuefszj5/52/toRttW27djzIzUENP3vYyZUeSlcTC2A+7Qr+L+VwcRvL2pZRcfpeNOb3VUV++Z+W9pvQrvPMTxTb99I3S7t8LGm8246fum78T95yW8b/GHxnbAHfpV3D9xqQ6vVR9NcOXbj/diXooA4P4wuEPAHbhDwB24Q8AduAN3CLgDdwi4A3cIuAN3CLgDdwi4A3cIuAN34A4Bd+AOAXfgDgF34A4Bd+AOAXfgDtwh4A7cIeAO3CHgDtwh4A7cIeAO3IE7BNyBOwTcgTsE3IE7BNyBOwTcgTtwh4A7cMdZAO640LdTq9FlUnCu2MtCNZtS6+9LCO6mxbb0qw24A/ffknVho1w2+NWsSyMtbfI+3vDHI+/QAXfg/kuw5xXojiSfyyhaW33dJmEzhNuVm2tazKEBd+D+GyVYLHu0HTrTKi11id+nuR5k/ium3d0O9yimPYZ0V9wNHVzO9dOun9Gb3m5xfe8eZbLl7fSkTnl3f6xR7E7Oyy5iAe43V5phL9uyGHZhXGvynTrrZZj42+FeeYeJcL+vdW+eaM/+k4AZb/WuuL9R+Xdxb5On//U33Bb35Hlf3ueVV+czcL+5IzNb9oOSmNItP18DkzrtdKFyuNXh973f8HbxTdwZ4Ey4pljNJM3y1IyxtEIVo5+i5bueKfSp+jguHq0+n6wUw9Qar8XGoluPZPRmwK/VF+Nr4m9k6ViH6gmz3fxyi2vRZF4UaZXGi2nHy28B93+07fa9duTs2PdVzc1alrL7esP680+4k3V2PvsgZrr5xh/IfWMUI7+drM/8QnQygcUTwLy67kHWYEZdIPvvg8IbMn1wZL5TsM6HRCt5byY6g0HfCe6Nt6veDR+n0t2GtuJf5R9hW1/T/FvA/XtqA+T0bnVwrZCqkU+Zl97IrRTjToa93bSr51u451IcORrMqK+GXgj5QJwRYQS+GF5vDZl1AndSkgl3AtxkxZrX9QN3efHdwbFcQwJtLs4MfUMgE+79nSprlXLdg6Hljdcq8pbKVde/Bdy/3yvyHu19hbq2xZF9+Bud9dJ3WEO9N+4s/mRd9lUYEyYZNeNbYcPrXWNrzNgO3KMfu+CNo3IeM6FJLXvfZM/UDCar73gjxr2KxWfc9d2COzstSW4zljflRck5KkeW/S6/Bdz/yXG3H6xg1veCdEPXI/dfd6HdG/d+DsjXiERuDEk9jo579DHnYiwtNGvcqbmqjo1heMsa9xz1cV2I2WZuiq9wdzPua+suq0uzoHK14kpCpaFViH5eefwWcP8n4x4/WMGte1Ec+zLxltWNfZn7PGW6xJ0dEMOEi8O+4C7uc2xph7vhbkX1P/hvVmeGrAE7KUnPUhRnnz34Ne75AHe2ImmpPD42NvK6Y3aO0uP31twV9xzef6DZXfu6hj/yPdvesKEqzlG5O+56kNa7wm1EAtQqXGRtGfDga/ZGcSdfR/wUsu7ZF9d7Jb2L0qwk9yNwE7Z0NKlZIF05Uk18YqfHZ/m/rPDlTiG7bER3E/ZifK5CemGbv/wWcP8+bu/44WXj2be5X+ZGvYZmVKZwJ19m9Zgp9hLUnK2tzBsfP8FV6Oh5SIULuU2tEO70yUqFT+KuZztuVbmKQ+OsOB7zvqOV3fN2NdtK+6qW/+e9loGv9HamPN83S4i0Ssq5ptp9+dVvAfdv/Xh+z3E/9GUIzHIrX2bssN7Ll7k+iMCcy2vIj94BeY6eGVPLO3Y1bV17p+ybcFNfRnzWejLck/dnYkR9J+D+syqbfpvW2S+3ssWm77CGcLbx7iad6rqkZ6H91LjnTTdk7eyHW7Ur7bzDOJ0Md+j1cE9h47pru9bEm7Ur9eZhSggJuAP3k/gyZW3rY72dLda2gA13a6gCd+C+w3F23fvomXwz497mHVrgDtzvrTF8zGxs/e2Mex07vKFxN80Ad+D+/ZbkDKNb5u9RVSjORXvZn/EubSk6t5qCHVejMU2yn7tnmBrnWSjG7nx+G1ePgOctiovzeqlY4A7cjxQ3rruZ5zzxI8gjs6zTu13awTbobG43HC3Pdwvdn/tEg1UmGmYzV8d1CcYPxLKpiG71o3FfaOAO3Lc4prXrLqy1cODXtNn6b4JyuMHXPGtqOySBtLyzn6yCcal/cX8z2pdBfbC8WqUA9xfH3VRyM3az99Kx614XOx9Wrda0nvPUtgy2LYxu67rbdLC7a+5QWOCtK47XLYHtjces9+wu7knA/QVxL0e+STl03fPKiV85yu2Qtb4Ts/u+rX0lt+b0o1bwauZs394d2fbVnaKsKm27/BHg/nK4L4E37AFadWsl7Xbyatn54fvJUa4T6S6/HZ52Pqwn7zWetUxuza7ZFCrl8YXJq3LWy5lbwP3VcDf5wlZfeAGrJ6xbO16mraV2W6JaJ9JeVobtfrKNa8/pE8Z9OxQ/bm8RddScuvbILn0Z4P5yuMdj13nXDTmPFuvMbCpIWm4O2wB4do4zsMQx6D+zqQGxg5nNJzx3rS52XbHSrja1UQa3qpXtsk4D91fD3YZD477rhuzcZF09t3Tgl+h7m1dt3qi1oGhnSVk1NJenVr2elI+dmbj2vdc9LhtfiauM6buya+New+UNBLi/GO6DFFfbO92Q3eOpsjjOsKo1TqvK0ooru52YqUO6mRnrdjcVFw4eEW3P1OYetHbd076dbDbN2c50PGgNA/fXwr3f4S/iTe9GQ/aPYiHjtCM37sBzm53ksVFbN4jNxhrPq37iPhTX7NdN02H+3vSX9Qjm/iEB9xfGvV55vrMbDVn7w5vFeu7JXdvcut5JZBvv9p5F2tI+3J5P+DJtXf/aCv1iZ9yTlsGuDyId9f0A99fCvVzBzG0br4paqd0C27B/VlnWNnfj8FfbmT5wvWfa0/5x59UepLKujpsHtiugq1rxTQUp4eA3gPvr4V6uOg5uSz8HSG3HPrFZb2bXzjZ3TqZVDUqb/qC27P+DTvd0scHGmaLCtLkCRamJvYLEdY1twP3FcXfXHQe7aSVybPd6afvzHqm8rjO59GqxaQ2Y7YOtLe3mHbdr+1jJrlrVbRTLMudx31epay31twD318S9Xm3A5p1lHbhsnsbbBamtv9EBc303dU3otpvT7QYeXHe71kMbR31b/JrhzVhZr27uUG3XqqjA/QVxt4cTRcuuI75uH0TV/dPRPF0sX/WsrwnfPuyUb/iR/wo7e2XkzPqx6/pOYVdl1XUqh1Vs2ydhfbX5GXGGM/OiHZGXDVWzM+6zp70ZM+AWEx7XDnXe3CFm9yUv8KW5rz9aGXewGuqero2LjBe9kFKcdNAbTztcnhWb1eZuPj4L3F8RdwKkfGjcV8MhVx/tvudx0zE+7QaepYuRiTrPm1/SpvqV6QPc59RoSez0+nnsKs9UKOuamLbOk9s3VXlDyc3kvh7hyR13oB5NV6mfn6jY9FlI20wfMO2gv+oyyU5fLXHm0HytRrtVFqj4Krjby3FZaT880Wz9graw7zQ32WaMY926Mm7f0qQr1Hmtbd9QbteHiWn44WbavOespbebwpZ5gsfGW1/V0MktDtxlbqZvBMs7zuhkjha7zwcn01DAPTrkUM4Xa3G59znU8oirylkU/ZUqFldZoMKr4E4XP14s2fXZ7fhfJgXRmrbsxqR0h3w/8HzuB2Ta51Cru27Q9E7v+3qMb+8XZbat/u52VlMsaRRh09oYvahxusS952bio7R6v5EsTZMpEi01jRmBzSRaYGWhZE7iwNWG16wjrKrsYCRhGvMX5YUDrSa7rGpM0/1WjpvKocv4j6aH8qmOYKh27NmH8TtGg8enOeA7laWN0nq9ItHPVUy2GcmgTOUDNGZqrY6EOnpY69mWz4m72Y8jcRcDxup2SV11/DnhuLsomo+1TetRxZvpIU7td51Ha8XN5Kf67kCCZVBZ0rpjtVvJbnBveeM+5XV9zZ32ZeDlZW4mDr3us8aXllRIVDuJ5yKJl5Se7L2TrEqSOUkitkswePrUe0Y93yd66qWepYnMsPeazcmOX+C0BZmtr+G8TpIvhBGVX9BlmbZrfX9i6vt+K23ZNrgXLQsdvU/N694Vdw6lnWUb+htoHb4j0AqSLUFNf5DECZ5KJsVsT4w7s7CKo2wOUhyUbcu1jw0QFzrNuLOldwN3dzHMsufp0PnU88jz/txfr5z7YCDBmF9lzJwYre76hgTstbvueo12Y1Z23PzGPjdTpCVW05AZycaUOI9BjJJZUtFwRIPkYXIT/zxTWkcuguKVvMakjSRMmqVJPpWak+RZMlNxinslhk2VPGRih73r6aEmqQbVeKG2+zSCKO/PTGrdG1cmBlSzOHFpXRzODFUB9tCs7Nhqqqkw542iF6rOZsZd0ktt8kU9Je7CrSt6Jy1H0+ji1tzHMTZAltWRbkayC+tjHrcbADbMfdEnsKuxBmPGRoz54+l7rURxU+rWUXLLkPa4In+kOZM61fTOYdz2Ny5zMwWC3mUGX7I0cQYCuv1oaqagv9ZTbTjNnEQ1RPLn+cB+cpo9Z0nAwTk6JEuTplwippknNtz94bLk1HMcHSSzQdeUZ/LDRtMzGU6ZwAUqHXcx3HO7IHlHJ4WHXGtZyFy3acGdlvFuguzBclo/rhAmcsaFnOfdhDlj4E+nSzjF1Oz+NDRfTMpbuzfm0quI3bhm6U5soz2a9fHSBt+8cuZX00G2k5rC59qJuwRqddQYt21zqP8UlXA7+/yHU7Pn3EyMe8yOnGfJ0kRuORFTqqRmSjvcJXMSIZa53JyNiTfb4C759aimpI47ASg1QvOoSu4yzg9iuK5wLqjIC+SHCXczcA9cILXuThH1cdugFR8rSn32Pi5N1UnzgwTZg+GjrZIcU5Kj5amnLJlxz+YlcF+FzTgYIWl2HTW78F9x2WhpTrp0MGs1rG4Ig8ttBIFPeY1mH1zDHe8ircfAz62JzYypI9wFkiis0NW3nJzHS2qwUje4F82c5CT/pLrvTWa2iOsQ2sBdszTxS2Zng6w4d0fWnu6GnfukjhB5OHyD0fRQddL0TIL7nF0kS5YbckzSDndZ4h2XVv2UGffU8TdR/Cr6ec3Fc4S75pTK+dlxZ0dh5qEe4lUu+kjiZTe8na202c0dWk+r3rQFytGU7o/6TrdGejXddtNLvfHM7MEKl7mZBF+i24ubQO07cuorZwf2/GG0/MgBnpbMSYooe/+9Q8+HsqReqmyrszoWjZ17TzZU9qlN1SKZyqiSCbB+pIcq9L4ELoah/dWeXsFJgYokNO5NVfbP2YuRsqSxv7h0ezpJ5lq8eEVOGsrVaQbY4cxorig+tKKr+PrkuPMlt5wr+NqYArPvmLerhydu9F7ZOeF82XXvyGd1WdOmttRv0b69A41WbDpY1dnVh91lvMzNVGSEGVcKaoAmW0e+pNjLPnHiJe4hXDInsZPj7Cqxkg5RK5qEaWRp4qRPks3JOm4tuV7u0hvQTvoExQZYTg9l6UdyzwY1r057dvw7hV11nbQSHbcNCq0jZaFto642ae6oSZ6c6R64dWG177kVPpAy3MGqJS29mMal58f9uqqLGzfD1GKvFTxVO1eCDZKpzAEay8bMtjjXkU/0I7lwMOeUG9nuIO+OaeuolfbCMb33IIK4fexkf9Z19u7uJD0E7re/kdTNdTW22k+dCBNvG4f43rhvR3+WH+YxR+D+SCpfdfPPjvvOCKSnv4LA/dNK+as9lg+G+wsIuH/NtLsYPp7X+mm5ivMK3M+oMfDgM9H1IOD+2KdpzA5JN02YAwH389r2NJ7hJpwR4P7sfnub0/XhjAD3Jz5J82gYGHfg/hrGnTtR4LkD99fw3N3iwjecEeD+5LjH6ThmNQTcnxD3UKq75QNVCLifVDGEW4+WgYD7WdWuzrSCgPvzyYJ24P5K9t2tQnRAwP3pT5S1CScLuEMQcIcg4A5BwB2CgDsEAXcIAu4QBNwhCLhDwB2CgDsEAXcIAu4QBNwhCLhDEHCHIOAOQcAdgoA7BAF3CLhDEHCHIOAOQcAdgoA7BAF3CALuEATcIQi4QxBwhyDgDgF3CALuEATcIQi4QxBwhyDgDkF30P8CDAD81KaHNg4vYAAAAABJRU5ErkJggg=="}}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate, br, zstd + Connection: + - keep-alive + Content-Length: + - '32688' + Content-Type: + - application/json + X-Amzn-Trace-Id: + - 9443853c-617f-4251-ba33-8efa0c612e6f + user-agent: + - unknown/None; hf_hub/0.26.2; python/3.9.12; torch/2.4.1 + method: POST + uri: https://api-inference.huggingface.co/models/impira/layoutlm-document-qa + response: + body: + string: '[{"score":0.4251735210418701,"answer":"us-001","start":16,"end":16}]' + headers: + Connection: + - keep-alive + Content-Length: + - '68' + Content-Type: + - application/json + Date: + - Tue, 29 Oct 2024 18:07:27 GMT + access-control-allow-credentials: + - 'true' + vary: + - Origin, Access-Control-Request-Method, Access-Control-Request-Headers + x-compute-time: + - '0.521' + x-compute-type: + - cache + x-request-id: + - w7nLeyvTh1RRHebhaR2iH + x-sha: + - beed3c4d02d86017ebca5bd0fdf210046b907aa6 + status: + code: 200 + message: OK +version: 1 diff --git a/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_document_question_answering_async.yaml b/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_document_question_answering_async.yaml new file mode 100644 index 00000000000..2c26ab1d405 --- /dev/null +++ b/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_document_question_answering_async.yaml @@ -0,0 +1,528 @@ +interactions: +- request: + body: null + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate, br, zstd + Connection: + - keep-alive + User-Agent: + - python-requests/2.32.3 + X-Amzn-Trace-Id: + - 5735577d-8655-4f8c-b49f-d052b5794191 + method: GET + uri: https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png + response: + body: + string: !!binary | + iVBORw0KGgoAAAANSUhEUgAAAu4AAAQlCAMAAADePLi1AAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJ + bWFnZVJlYWR5ccllPAAAA+5pVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADw/eHBhY2tldCBiZWdp + bj0i77u/IiBpZD0iVzVNME1wQ2VoaUh6cmVTek5UY3prYzlkIj8+IDx4OnhtcG1ldGEgeG1sbnM6 + eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IkFkb2JlIFhNUCBDb3JlIDUuNi1jMTQ1IDc5LjE2 + MzQ5OSwgMjAxOC8wOC8xMy0xNjo0MDoyMiAgICAgICAgIj4gPHJkZjpSREYgeG1sbnM6cmRmPSJo + dHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4gPHJkZjpEZXNjcmlw + dGlvbiByZGY6YWJvdXQ9IiIgeG1sbnM6eG1wTU09Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEu + MC9tbS8iIHhtbG5zOnN0UmVmPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvc1R5cGUvUmVz + b3VyY2VSZWYjIiB4bWxuczp4bXA9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC8iIHhtbG5z + OmRjPSJodHRwOi8vcHVybC5vcmcvZGMvZWxlbWVudHMvMS4xLyIgeG1wTU06RG9jdW1lbnRJRD0i + eG1wLmRpZDoxM0YzN0MzOUJGRjExMUVBODJDREU2NTMzRjY0MTBGMSIgeG1wTU06SW5zdGFuY2VJ + RD0ieG1wLmlpZDoxM0YzN0MzOEJGRjExMUVBODJDREU2NTMzRjY0MTBGMSIgeG1wOkNyZWF0b3JU + b29sPSJJbnZvaWNlSG9tZS5jb20iPiA8eG1wTU06RGVyaXZlZEZyb20gc3RSZWY6aW5zdGFuY2VJ + RD0idXVpZDo4MGU0MmU2OC1lZjRmLTExZjUtMDAwMC0zNzM0YTAxMTJjNWEiIHN0UmVmOmRvY3Vt + ZW50SUQ9InV1aWQ6ODBlNDJlNjgtZWY0Zi0xMWY1LTAwMDAtMzczNGEwMTEyYzVhIi8+IDxkYzpj + cmVhdG9yPiA8cmRmOlNlcT4gPHJkZjpsaT5JbnZvaWNlSG9tZS5jb208L3JkZjpsaT4gPC9yZGY6 + U2VxPiA8L2RjOmNyZWF0b3I+IDxkYzp0aXRsZT4gPHJkZjpBbHQ+IDxyZGY6bGkgeG1sOmxhbmc9 + IngtZGVmYXVsdCI+VW50aXRsZWQ8L3JkZjpsaT4gPC9yZGY6QWx0PiA8L2RjOnRpdGxlPiA8L3Jk + ZjpEZXNjcmlwdGlvbj4gPC9yZGY6UkRGPiA8L3g6eG1wbWV0YT4gPD94cGFja2V0IGVuZD0iciI/ + PkiJ1HYAAAAwUExURebk5qKjp9FRRlFVaQAAACAtWGdoe+ShkLa1vTw/Stl1Y+/Kvnl5j42LlczM + zP///yeDOCAAAFr7SURBVHja7J2JgqMoEIZhOAZ6Ad//bbcO8Irpa9Idk/z/7nQSowb1oywQqswE + QS8jg1MAAXcIAu4QBNwhCLhDEHCHIOAOQcAdgoA7BAF3CALuEATcIeAOQcAdgoA7BAF3CALuEATc + IQi4QxBwhyDgDkHAHYKAOwTcIQi4QxBwhyDgDkHAHYKAOwQBdwgC7hAE3CEIuEMQcIeAOwQBdwgC + 7hAE3CEIuEMQcIcg4A5BwB2CgDsEAXcIAu4QcIcg4A5BwL3LdukO0vho01ihL2r0tu2/m1YLlxK0 + Gp3L2cWyWdHYizVpYV/XbdddyrERrjP0T7ib0KUoufExuLFG1s+F3pbx5XoPsS9rA/ZlHyHkFaEp + 7NZ8Z90YDoV72NkxNKZ10dsHwn2G0i2420tg5+9z/1h3hDpzHffr6x7jnoH7mVFvaXdTTqmZR8Hd + XuLexnfpEvd+NyjXGb3A/Z11gfvDoW5tvfQ+adGPIH973OMl7qY7NqFe7iAe2uu1W7TH3b6zLnB/ + KNivNLVmK9/Oj3u+xH3+ulx65FID2iGl5RD3ue5sVIH7o6l9APvc03Fq3IfHssY97iz/ykbbD1uY + O9zLMdHA/cFgt59UOznu9RL3su+2WRa19c5ojRjzbk873N16XbdrMqBn5jncmJ8C/gdwd5e420vu + 4mqJ3ZrhuN3TFve2vYvYbZNhbOqotT9r298PPZBp7z68OTHunek17gdd5+uOmbL72m3o325sdz1A + dVM1Drwm6HSm3X5Z7cS4pwvczb6Xcn4KFY8g3fbTb3Hf+0VmU8mA+9OZ9oMH8ufCvVzgPn8/90S2 + cNBxU3dfasXZ4n4BtHv3W+hsSvZbuo1D8xO4u+u4l30/pF1/aw93fox72TcCEnB/TkfmaMjUmXBX + v2KD+0XXjF1DCtxB++/w/iO42wvc6x73clAzgDto/2HefwT3eIH7RU9kXD8dAu6g/Xd4vy3uzi02 + fIP7xSAxd+DmA/cnl7X35v22uJe4gLnBfR7oYg/6If8NdwfcH6VPpv4z7ulcuNulR3GD+76vsW0+ + /hPuNhYROiJPrmZvoHQq3NtC2xb3HYfp0PP/Fu4bAffnddxv8Xz1trjH7qTkC9x3PZH1cNQXcIfj + /rPu+61xj7MjvcXdbgfqls3IXOD+Ao77rXBPZ8K9zj65O56jYdZYbgdPAne4Mj/sztwa9zZzvMXd + HI4fiz+Fe8hDq3nb0BMY938Lo3Jr3Me7Pe5bpE047La5Ie6Y2/F8vTI3MO83x3123q/gXtb9kBa4 + o536m63Vm+NuB4073Mvaq7aH01qBO4z7D5v3m+M+O+873DeDxOqWROAOz/13vPeb4z7eth3um57I + chijA7ijW+aHzfvtce8o27jFsq2h3fZDAncY91/qe7897p3OuMN90xPpDgOO3RB3V+Is4P5cDdV/ + aazeHvc++NG5KyMX67Jx/THc8ZjpeRuq/+LN3B733VSPi3Hp5aIfErjDl/klb+YHcK/HuK96Iu1x + bOzv4T4CgyPwxvl9mXoU7Ld+Z4k5De7pGHe79ETu+iExm+lV+mXSZRjUdLEsHcV7T7uw2O00uE/5 + EPfVILGym6cN3J/ada9rRg998HZhttt+HbNpAdQT4R4PcTcLte5KZCTg/tyue9p1wS8e+AiRmi42 + S0d3iW877z+Bez3EfTWp9FqnDXB/LdyNNr3MzHKa9kvs5ZJ/ebD6E7i393G30z6CHnB/jZbqDneJ + dc7N0DZYNuLG0BIz/JnLJf/UVv0J3DddkQvuo2ummn2mJuD+Ii3V/afx0ehNoA1Pv9r+3eU6J8S9 + HOI+fJyS9k/3gfsr4t5mfmtS023m76kpKu785Tr/1jXzI7jbQ9wHtrHuU3kA92fumLmGu9lmiDbi + j7e67143R+9Phbs5xL3NOWau4V73O09fwB1xZh4Y98bv08pdqfJkaWPRT4v72nlfPezPIbzfiI27 + +8Bn0hnMG+Op6uPh3jY98jPuI8nQFvdtr/2ZcC+HuLvD3JCr1UeGvLIZu7vFvV7Jspon4P7YuKeB + ++jCSPUC93RK3NMh7rvB6OmiEdu3T++lItt9OQefjMAduN8J91Wq3xXuu4yo7cKrJ4bJY4u7bXd5 + zFa7rrXs7hWXgTcQe+PxnBlDOsT9pL77FI9wt1dz+7rjdKjtCPf43rrIq/rAPTNp6ZkR0JN0vz9A + U3U1juAgL/xFSuHdN+E4fGonur23LrJmPw7u7aKTcQV0t/RrxKfz4t6OcDdHgL6DaTvE/ThLvAHu + J9X7j5nq7jHT1Af6dtwv1zkl7ot7sp515I77Ia+4M3Y6xv1o3TQB90fDXQcIVB4y0AcIrIhuy5LV + OuccM7Pmrlyz4dsxbcZdo/0g5fb1dYH7+fT+ELHUU6bOA8LMnEQ1zcPI1uucCHd32Sy9ivt+xPJ2 + zp9r03Xcr6/r0FQ9ndLWnnelgawxq3vAaklfdLnOXca7j7AWiprGuXD14nu3NuLJxXfiYZjq5rAZ + a8+sHWxxbd26/oVZBcydAve2b8GaC4/HXDRxzYFX9Ou4/1QzPqXUzO3Xhe54TetV3K1ev/XEvHlJ + vVhyx8l7EPSNtuqi9aL9ROyPl/x2JAII+npb9e5hxIA79HvO+8OGVYKgb3gzjxo0D4Lu6M0Y4A6d + tm/m4QNeQ9D9vJkG3KHXaaxOwB16GW+mAXfodcz7BNyhl/HeG3CHXsa8pwm4Qy9j3htwh16mtZom + 4A6dXvdvpwJ36NHcGQPcoZdxZ9oE3KHH6J2p/wp7TRNwh16G9wm4Qy/DuwHu0CPxfl/agTv0IPa9 + 3oB24A49iH1Pt0AVuEO/q3afPhngDj0O720C7tBDyqR7uO3AHXoMA99uV9O+2+Iolf4r5nh/jb+9 + HsevLqFTJ1tQ42Dgf7yN+o+4Zy/aNyCyhryO+m29srH36/cNVx/A/wrs/4J7aZcd/8lretPiXUtl + TfVG64fBAbgD+F+C/V9w74bdqRXnF0umWnkvPnbDTcxnM1Uf5RtDN4XQJpepYjgfFHczeZflRhEP + 7hfQEwP/gQ9/+3Dm/+bM5Mn63MiKRx8b4UtLo+KeY8n0dfXZytc+Zu+oUpRKq9ESWqqrBj8Z752j + lXmrq3cE6EmJv2LjfyZ0/z/gHgIBPBmTAvOceVZV8q47M15xDmStszeW3zPJxsy45+HMGMPvaZkn + Q4+W6wsSv0c+tZ9KU/GvzowRM9/ED8lTm3Ev7N0zzMHn0Cr7O1QrmnxW3OMB7rj2L8u8IepZxvyk + vftX3DN57Nm3ZuWtmXEXa16lIZoqN11noId1j4fWvZBrD/sOnQ53kYnekWFv5Mwkz+Aq75FhZqc9 + +cAdNJVMf6ZlVAOyUm97xZj8gjutRS1ecvDRXIXOhXsM7JeIZx5KsOLMWO6gEZfcSpd7JHemSlcM + VYrANYCqhIvZSM9M75R3gSw8fZWzuPyZV0bXJHQu3L9YOa4+cYIg4A5Bj4t7q2h+Qi+DOwQBdwgC + 7hAE3CEIuEMQcIcg4A4BdwgC7hAE3IciD1xMOev4AIkukHIo8qk5HuaV3Ie77+P4HQZBQqfGXcbB + NJ5vx4TLgF7Ln2Rgb+LhkVP9cGxjC7qGL7gU0IlxDzLgNwQetjv1Sao8+rd6I7gnqQONI9Iw+cbo + rPJGS6yMoDHV8rZ6bxhDyGRlY+Q7DlcDow+dAfeUGyPKtrzxqPfGEWbYM9nibqgi8BTtKQcNRUB1 + hAfKJ5744YPJXs17x93JyslnnuLHE2DHJBAIurPvzohmzxaaTXFQMPv0vea9TP8gdI3UBwlakIhn + mcdBL3xfyLZ1d0dx50pifOJ9JN+afMLIYeg0uDeZlbfgbjR4DIckqKVmhtlGntcqt4LMdwPy72kJ + h+QovF7bOjPR+SR1wKfio4sjYAEE3R/3ybiojovgPqKIrZwZckt48ilXiRgax1DKLjvy3i3P/TMb + 3Mm3yQvu1OrNOcJ7h06De4zsYk8d9zR3sCju1Yu3kzruRrx4btO2MkVu6OYt7rxyW1l3WlQsrhF0 + CtyLRNjQjshJowmQPc7aVNWOSPq/lMDh9OqkUTpoiasScKxwtww1V2Vr8olCtT6SA1QZd3bavbcZ + UQmgc+AufkYNQd3uUicTmXbBXR8ztWjIg8m22ikuj51oE44xEwM/kipBeC68YaRPvLKhzXjvJoeM + oATQOXCHIOAOQcAdgoA7BAF3CALuEATcIQi4Q9CNcLfyKLUejlesmqnArZ//p838DR0aUIzVNWNf + s8nuSpalNutcKCOj422OuFjQvXDvwyDDIe4yrMBtpjLFTSIaL4PiebiAPF8dIwWClz1LkHjHg97b + 2E/VUfMQdBfcnRDLY9dlgtKcUMdoXTA9X0HrGZwEd9OWysIbUq1wMhxeLX+SCSCF98sDZjibU55M + kB/ybjKY4AfdzbrnkBX3zJQaMtA8MzWqCeaR7XnSlDa80OfiZ6zZjDuZ61c0U1O/RTjHYyAT3zXo + W3ZhciDPhmcAyizADPMO3Q13ppX+8qDdLPM2HKcey91Qi1/CfkjmQcDVxDCtvBFveY6fL7qm2Xs8 + Tl+tOEyMep1vJRB0H9x1Ip7PttI7x0N6nQxrV2C7kWa772TG9tr3JtDZrJdpm9ij4x51Lz1bmeI+ + AXforriTRXeMuwzdTaHmkqufW6tm6hNNvVXcV1acQSfXJpdO8xZ3bQVrckpYd+gsuHMePTHRTSZp + u2kJHKDxCLzMcOJEe0xy8GvcOVdfPcI9a7Uo40bBKzT+gKgE0L1wly5InjhNzkji7MDsuS9zj6R/ + hZNn8+zVrLiLf6/ISv9jFtzrDvdCzVqXm6HK4PJcH7Jv2eORGHQn3KM89clqpdnLSJl8jtndsLmb + aIkXwzmFHT8/4r4Wce3ZcresUfeWIlRZKQeqNk1e4rxC9gHT+KB74f7dWgKHBHod3C3mnkKvgzsE + AXcIAu4QBNwhCLhDEHCHIOAOAXcIAu4QBNwhCLhDEHCHIOAOQcAdgoA7BAF3CALuEATcIeAOQcAd + goA7BAF3CALuEATcIQi4QxBwh6Cz4/5AtcmYZzuiK6U2wP3LKi46zg9pQ51qkAQcNfSI7Mm5GKOz + Uwkh1wc5DzEE16joHK2YCs3/inOuyAmqckDOyBHZEx9Go/I3R6e+uTJFOiC6QJauTOBLZDlgMx9B + 4ti0dEBRr36MdKgGuL8jF1h2iiHKPz5roac+rUG/TH2dR5AcjyM4OPkfvSl0MJmXSbhtPdpAX7NO + HHmbTjn/c/THUfktX5XUpNS1HxfLTKG/MgJhDkQO3K+I2G6uYxFD0UU9s16zBHxN9H1Z6sC5lRhi + uvyNca9UZj4uKn/KcudKKQZnkzn9EXXcAzHsqKSVa2qLcjxOjouPx9G/TMdmrVz9RouDA+7v417o + 9B3jzmcwcMoNQqg+xom0bOmIjB3udoZbjsOEsx/RwD2Zfm24DvNx8G2LjsvwhSl0ofJyj6KaYMXB + Ae6vgrtYd9PUmXkC3IsRa+4mrr+Ku7uCuzpoL2Lev497FSSeBHcqq2a1fArcXXZN/Pc88esed9Ov + zWzdqz2LgyZJpMl31O6BOfdFi3X1QiuYX8c9EyHmaXCvat/I1JXiZtwT+++Ph3shYySWnRvX5QL3 + Xr2l40YOjXy4k3SgZcnOyHnUsw9+pNzlXNN5fuF0YL+PO/datKfBnbss3HxnH7g7N/phHgp3ttZU + SiqusdI0PcJ97jSLvNZJXHftIQpZE6fnnpqUIDeceZpuVl5ymvp7+O7xeXx3te+Fca92se7sBU8P + aN2NOuN0eypyh7rA3eRQbNWL74Ip58I9ZyXd6FMPyarLCdqTvFDT4w7Wvci5rU+DO9t3cXc3HZFt + ekDc5ZGBkwZWpAuxx52fPC2+e3+m0M6Fu/E+96dgmlnX5SSp0wMha++Ce3se3JMrDPNlR+T0mLhH + wZ0OwkkH674jMq5wp1pOa0dzLtwZJ+/jZMjgrHF3QfiHdf9HTybLvf9ZcLeCOx2V+jQz7tNlR6SR + 56+n6SDruCfNut44EXWSvOm5det+H9wd+bi5++6ulkIOIL+0BXfygY17mKeq1l4+Vd3hfv4jGrg3 + wT1Jq1tKHeenqsXoU9XI12w62UNV53vTVLtgyLzbZAy3T+nCsAsvy38f9+2YGRlJUpYhMor76UeY + 7I/nYszMHvd6Ijf3Ku5WEXdyGfiOm/p10IEEMvhnNWaGuypPg7vx5FBmsubRu5Ryx5rarZXaqfRS + vHTJ/zruNhax5ClafV9Mkhdt+KvhsC67B3k4bYqMfpQHGXxM/K8uzznos/QBn/2I6MTTMdhR3lpi + kpaJlJqPaaou82HVIteMTRN9jKcZx5c8yYojQxqnOntfuDtJXnho52/jDkE/VGPN/g3XSbN++Yed + 4/xCL1SXcAog4A5BwB2CgDsEAXcIAu4QBNwhCLhDEHCHvnuZEUUMehIdRBHLF1HE3CaKWHKxB0uz + 9DoipkXDr8414D5Obbp8t6jG3WxfK2cunff8tXb5bjlPZT+MSmMSpXPZi29EEatz3I0oYyX77CYT + H2g862/gnucJsu5iqqzxPJF8s3MfJ7I6/rxzI0KejyxfYOTHLPjlCO1Ets+XM+L+lShiHFKpysBt + J4ObNWKa5VHyI84YcN9Afok7m4lpO2Gcfolnk58X9/wO7oy62fE+NZ8mX0+J+xeiiFmd8FR5rpZO + aBlTWer0dPpH3CvZPMPTmTydxuqcz23GY7KRXBpaZDJ/jKl433LOPp8Xd54rJkFY+LCKFH7cmniM + uCyKLfPH2OjbxMcTz4f7l6KIddxbcHI/AO7XcLfe0W1+qjyf0NMC+SSeu/eODUWkF++jZbse2Txm + X+pJLTzHdPAl+awHwmWlgmvdjHQ8SRw4Kr4vPKnGW0uge1/LiSz8N6KIzbjbUKpMTPw93HvLR164 + Adgn1PQXaue1KM7iNnxYcvY+uIu7623xPH+QmFjmVaUsk1F4Efv4HCUk8p1fjedZceeKmLzh6e5U + cjb3w0+zfDxNis4HzQdDDZEmZj/kk+H+cRQxs0QRm3EvHKLcrXB3xf3sLKfom76YqQeXCTyTiTwG + mbzkG1kbbgGuwoc1vgPnbxvMf8E96D2ejF0Q3JkSxZ3tB/mCVAOCOgnZddzZfOR8WtyrVF+XZ9yj + 4K7H43URw0AQddzrqY7ns1HEzBJFbMY9ssefV7gvQaV+6IRrZ0YPCkaVje2JF/PINaCq+dF52hxF + TDdoBFL77g31m7jzbEEyfFLn6Ha+x13iPrGdLH6HOx/ACXGvedyBqOztAvcqZqh03PMO9/NZ9w+j + iE1LFLEZd0dfyddz1AVbf7LbuEcDi/qSgtw66exbni7MM7HdFDXURo8iRq6mTNqevo/QN3FPDG+m + n05MvTRax4vsMfhqmtdF3QHgLZzifj5npnrLVoOnvrN9UWcmLM6M99aIOx+0Gd6dmSIdkf5suH8p + ipgdoWi4G14W/5Lv3qOBGWUmZjnfRu02W3j9fVpgJKyShg9rurYPv4o7t934IYUTb4uKSmetv4xY + CbICL3Jq3XOhjSrnPnIndGacdMmw98h8cB9N5kLLAahjJiv0Y5kyuWXEjOcUTrLgZLh/IYrYCnd9 + GvV7TdUeL0YAppO44M6hN5KQxN0bqePew+eR6XH+d637/CS1XXsQYY6en5rzPrXoT1LbtceI5uj5 + 6emO550oYu5KFLGOOz97zVm89Xvgbri+DdwzR9wQQ8N3VIU8z7hz91j+bdyhM+obUcTI7lPjixu1 + USNKLT0zIyjcL+Beg3owVt3JSe+ZSfpCzNxAYtybdCc44A69F0XMXokiVntEe0ncYPm7Vc/Mz46Z + UWMtuGdJVyAx3LOvqdTGP+19q9FqFDHbNzCEuvtuGDHg/lQysUcRs/soYvZ6FDF6Y/uzHfpgRgSy + JSjcj1VOfWZttatLn2kbzduRpSHI3e7SudHDh8kGtscZA+7Qo0rjg5m2b0ttvh7Mfj+YGHCHXunu + h1MAAXcIAu7rdsbSaDf1o2kAqZ5+Ftj2OUHadUk0kpmuxqC90gFv+laX/if0ULhzrO0RZTv6a23l + qk8qxjPJMzeVvO+DkGQWVtZx7/xJHldzb4D0EVwZm3Q8bKbJVv0r4ytwe1TceRRbv5BOH4Qd8d7X + CDKI89S8E9WWDsFWm+gtj5QhuOlTH/DOY2pa9fHa3NR2ePNKvrRUx4EnWPeHxT3beXCszubJZTKO + Ews3xwN+mqkxx270ZY2iT7VL5VlBPNNpqpJ3u/IT4Zbvmy/FcKezHggfVQhjKq4b01VkqGqYyCkr + lSd8cpFlxhOvlKxMgqaTwl3W8k/6u3tqFT4ZPlnpEOfTI2tCD9ZUHYZv7CP4SozwDM7JW06MWXq6 + keizDDa13hU/BjTT/7HKQ7TA85zqva2/VYdEBlY375wUfQys1oPkEZ2VbmyRB3X4UKlWRJ3ZInMT + qvOGqgePatLnhYo7bdM8Ae4TH7gMIC3Og/cHw70niipjArYMWyaHXkatWSODlLvJrjo+so+o1Umh + U7X6qVvSe/u2Tr10TdfMPreZlmgL5LuHIA6clZuAj3LY/NS7CuuuD8o2MpVRx+/NuFN1TrTP1HT6 + kzp/YO+hcM864GFYd6Nj1lxoirtkwVx5KJZo8jLcbuDORp+tu7QJyTO4e1NOeNcJWn2kuxlxNSqZ + e1c13IbQGqMUufQ5UDk2+czZEJ3LKcdpa937pgQ+t2JC+I17GdoKtzslYfgwHYlu7zjbKw/jWeOu + d3brjQ63G7OEmO8YxgShZO7ZlGvRKOBSVrnR8LsyDnJ4bAN3Hkg+pdZ4roTMx43Jc19lo8PJzWUN + yDB8d5P63ctlx45OMq39zNirxiHA1GzYvMy9s860nED795uq3NnSuhcgXklj7pve3gtde3VtejcO + o9/nNukLOQbiFIwpcmYMj7vTaeizIkce2+5g5Y2jtsKdQxUkwpkdmD47m/cQstzFRjsk0aqNP8iI + bvpATlKdxpo/odSHN453nfcYTA1oLXwb9yb903mg7+UyFnFO2AnO7KcaibvR15Z5/Oy0yNQ3vvHz + 6jmrdZcv7to1Y7VkfRKkHF5aTYncW/fMg1B1i6gpzJlzOUZa2HrombnffeCu9Sf1NX8Gd1tlwqmG + Duvlp1tRCQ20fxd3vhun+TlkqzrLtxW5YdpijNE9j4eTqahtqYnH8RtqpfJs3NimZvSBZIp3vhpN + S9gfjxo9osscn3xgRj8n6WzUr2RZqePzWN3QWTKbdbSdU36qmZKkC1Uj4FX+ZEYleMooSedvzhR0 + SfygjnEPy2QO4P7LFyTipP827ja4Fs7pzFi9Rfa4YX2q8DqKmN4xN1HEWrQPgzv047jLlLs17o3n + ZJ8yUiG32aIEgOh9ttzY4Chi9TCKWLljFDHolLhHJ5BvfXcN33Y68ZzT6qXfixvxqc/UuxZFzN0r + ihh0Wtx7MLw17jXYFsoJi1uLxk1K2gMcnDyBPF8UMei0uFt9XLfCXWMNnJL3qT/pSBIL1cwYny2K + GHRW3PsVXeGuHs5Jn6pGjajR3fGO+/miiH28z0+E2Dpe0xwV7efrZfvprovRp28OFv4k7vpQ9Zy0 + z0ND1R1X3JcoYu0kUcTciMl9WGHFpmzGuJbNmvq0LyQt/tzwqBopSh7RNmqT6zirrIf4w0NmU9Df + vfQvl/tmH/LFESIPz6Xv4yCjHNTSNjQ8vJ3nf0m4oj4NTI8r+ps+TVbATZRkTIbjKFlnT/xQ1QkA + ToblxQHUB1HEmIPfjiKmt5Nw7EJpHGy/rQKbFTSQeumDEPt31Uvr3MlJ8GSRuDWeZMCNjLT5yQtG + ZqQZd5RYbPVczNRV6pqjfUiVdJ3qsFQDmQLgjZUgw0lMlh5X8TbdMtmN4p4kF5kGOOXgYPTRnfIh + U/QxlSIOjTIeZHjs+aKIZT8C+adMtqsVI/+S8GJ58B8Xv2U2hNa6IgN+y2przUzGcdJiL3v0XHvk + NkAEBBliw6OtpPPJTj+bxSz44TrWXuZCh+UkKWnl4bzG1mybRGau3OqjFyu3XLfMvQ5SRHVDfRlt + w+T55iyXj4NpRwkOzj3KbT0q7Ua1lqOATYbjg/XQYdalqc8gO58kYJgETe8TgSS68gmjiGUXfR8Z + lXmyqmCbxhBCupReupMymzq+ixc2z7P76KsESpeu1DZMqp0N6fBueFpQS6M1/pNZEOaB7QwrP+TI + nnOm0T9+xuGk45eOQo7QBTFG9MKrxcWKt+Ckvvbx/GM4ZUpmXB4/WmJTkuPSGQAe/QWmvd+SuncU + sSyd//q3Z7rg52Pjxqx2TgcDivniiavLAWj9EMTWifiGx7NQ0W9g9V/6Wj/nywx7wRnHQpbfltlL + tclMpMqHlnjkeuPpiVZvOUluUm7eCT/PydH1Efxz9R57n+d/2bmXoc5xcKHfqFbfx50uJ11Yz1HB + yekIhic2jCsnT3uVZ011VDeRN+gLcr/k67qqBWOu93yLWHA3P4373PqMgb2RnmFnCtVyaajNJ+Pb + tYjBcs4peql9CtPYieH6Tx4cH4PLq3pgVwcUfZnWuMO6PwTu3FzllEuJnVgTsmt5Nz5ccU+SDMOv + m2SS4kUffNg97m09J6SvICv9aAok/VEqaeC8qXlJwqS4+z3u+kJHVesyDpoao7nIts6TUz9PDlHc + +/yv0fWmuMd9Kx46K+7isnhN7CKuzDLdVHEPfp3Siy1+98g05Zdc9711N3Nsmr5QsXDfHyfxKUly + 1OadxlFYcGdnxopfIrg3/SQTzyv3sSQJOaIPMpu49mVMbRohSTruHf44XBztTfanSu0E3N/DPcnM + nSCD2oq038xMjybg8332skZv8aPnUjiIwu8ed7plUMM86XQpN7Co/qeTbTuv86W1jbrkHJOgGVLb + lHrH3/P0RJlL7nTI3pi0leQUBJf0oLJfcLd8Ltg88Mv8yMTwr8KXOT3ubYxInkyUfNI8s3qZb2z0 + nX7H6/JXpo7MXnr/14k+qxnZvFKylWSMpdeS5l0l99OTcVrUpLlUZPpJKUrjMpomv83llLJal3gK + lpHEe/SpTCPAyDgFqTX1V0biKvmiHxC/cMiRfuAmRtB+fty/6zM8yQwyznq/3Ms0ZhgE3O/7cz/p + 66/zFsFAA3cIAu4QBNyh27S3STx+yUmE5TQVDismrWEJMCa59pLj9nWVb5yx/OoScIceTjp5L3NG + VR3ynmWBBC2Ud9yFVGRJ7NE4yjzfD7hDD4e7tRx4g3GPgYe681NvM77iIcGSIDhNyZaQreVwY8nW + BtyhR8TdkPUuZsHdzl/x0z4euJGDhhSzYuvjWSexAnfoBrib4BRx4A49P+4pRAX93riPEGH67JGf + R1sNH4YoYtB3cXdFu11m3G2obYM7rXGPlmoPGN16dkMZR6pxl+coYjwRLBhZBVHEoM/gLjNW17gX + acmaBfceEf63bbvE99chtRLx3iadDocoYtD3cY+2tA3uMSQNIzlbd1vu0O0uCVNCNl6jo6cgo1Cb + bxVRxKDb+e45NF18/6YqUU2FkAgrUecY9IR0iCIG3QT3Os0h9O6Oe5M8PlnHlzqZTqO4uzmKGIN9 + 8ihi0H1xj+/iTl/lLN76vXG3OnVfQ4Rxg2Lgvo4iFo6iiEU4M5DiXmqQtqht7KCT715L6U9Vawmh + Jc1/6Fa+O63R7kH7PFc559STv/HHE0YRg86Kuw6Mif31csxMYcMudwHNE+7uNGbG+NB4epfnGFMp + SmA1CSvnJIqYGVHESjpBFDHolGqFJKH9Ss7cr1EjL9GHM/yVhhWbTKEv9RmO5eV3SARXJURYlNiZ + kee3c48697JfRhEzc7zQcKcoYhB0szra+uPSdTCx96KIpV+PIgZBDyjgDgF3CALuEATcIQi4QxBw + hyDgDkHAHYKAOwQBdwgC7hAE3CHgDkHAHYKAOwQBdwgC7hAE3CEIuEMQcIcg4A5BwB2CgDsE3CEI + uEMQcIcg4A5BwB2CgDsEAXcIAu4QBNwhCLhDEHCHgPsX9ffPGwTdUeZ3cYege+o/ODMQBNwhCLhD + wB2CgDsEAXcIAu4QBNwh6KlwT7bxPln0V3/g7hVKSjOKxe+aTdvSzgXdrTnWNuM4zNkv5Sh0fzMf + 8vowV9dFz8Rqrd+/NAsj+rf1izIOaD6qm5TxlkdpXAihTCWEzG9yoILHYO9MQePCZCoFly4EQ0UK + wc2lbbzQpcnQV0beyyLaqDZerfCHxi92qrT+aeWogDZEes18jG3KITk5EDfOgmtT6odrx7E5009M + uwPtmc8qF6PK36IXKU4Ts8NnO3K5SrBRDiSfCvcYMpW2VTl7wTo6CHOX07jFXYuTpiyvdKGDC3om + ubRGv56mgTvBoIsI98x4uExoFL4KD4C7Y2As/UmTXAKtwOMs5HE2aM0sZyJLBbkT7kkMDl+JKKBH + Ps2O4VfKpWiVTWbsxT8T7pnOc+SS07k0Rt4mOdf3xj1JWbh4jW0Zc+uYBjqjla16CkIH494aL5c/ + VPhMa4i1py2Ej8fAPdLx8gElvoUVucZNa3Ybh8t3Lr1AeRK7dA+rVNWUR6mRYoMyFaXItWLc1Sbx + VZqksKdyZgyf36K4M+RsQE5gD/lCUzmi8CBYFLrIxagB4VetqIq7mfpNKekB8DnnxXxlzKPg7mbc + 5Wjms0CrjMNV3MWXM+5eDifZEiYlit0Oirv6WSvcg+JebnLufwj3rJ8bn/aT4R71hiNG3/J1/wD3 + 2HGnddJj4J6DKQe4mxXuaeBunJvuhjsVbjSmxJEsciG2uGf2jgX3eG7c2Y7mPJ0Ndyv+ouKu/uM0 + Lc7Mddzpbvog1p183fKudaf3qeM+tryLCGXmmgwQO5HuAHc+mPoguHfX7Dy4B3HJo/C+wr24znT/ + Zw9xry6Xx8DdhniIOx+V6YdrN7jzresOHTPSbpD2XrBU6EPc6VN5DNyb4nU23JX3Fe6hE957ZvJ0 + iHuqYn8eAPdGHvEB7queGbvHPfTVfrljhsvAPS9EdhFjcol7Y9v/ELjzeTRnwr1yz4zy3pQGw7jX + uO6IdO0K7k27zc6Pu/SnH+Gu/e5yuLMzY6bVifntjpki55eQz865Y9yNDeFBcC/374a89N1H7wyX + tkpH5CSdkr0jcjmUHe52ehjc0yHum6bK0jOT7+a707k1UbuR+IZbj627eRjc6wlc94OOSH486q50 + RF7iXmbc3YPgbqZD3NuM+0k6Il13sKihwQ+t7UVHZJTHZfmcuE8H1v0suJf+mEm6Ayqf0V45P8Y9 + 9sdMtT/mPi/udfjuDNKncNdjuxfumZrUUfrVG3ksyXbrXgX31K07PyI8J+7UQqpStLPh7uTBeqZX + x8/oXHViOFzN0lXxHu5WfN0sjzrSqXGPclxy/+eK+T7u4szwxXLaU08npt3hwhh5OFO4X5RvoKVf + lNJR0sEQ9aS4a6vfmdP57mPYkQ4RyzpCRAdLjetfr+A++nOkMyyfGXdpZ2cjfkD6CPea5mNr9xoz + o5Q4eUhg5AYa9WJlMy6O4N7CGERwLtwnU2ti42L6CFsdzHlvDBKJy5FEZFBqLDKMpMRqtKxc0JR4 + lX4+kuVBp3IYNpbGa5iTHM/1A5Xj0aLzn6RFNuMs6Cjbfrjyymei8cp6Yn67vHI2qYBcRjr7cnb7 + QfDFsVMvfrrdub/5QZqGGSPQaU0CTgEE3CEIuEMQcIcg4A5BwB2CgDsEnRT3f09n8IaI/NCjpDMg + 3N/e3v5oyppd4pp9HpvDvDZ/9l/82bz5c2WT7XZ/Rgn2m78thTsUf/dnV5TVnv/sC/rnsFR/rh3c + ssPLc/DnWuH+7Av4ztnb7efPta/nku+K++fyFP1Zvv5z5ZL+uTyJ27X/HBXl+pnYF/7P27uXfvvb + yN4BQcAdgoA7BNwhCLhDEHCHIOAOQcAdgoA7BAF3CALuEATcIeh+uJvcPr+qZx2tn67sJGXzwS4v + cii4dwPe9G9dfN7rzLnYJhs8H2n0vs4nn89l05c7K3LZkhaxv0xaWi6787eIdfYjR2mO8T1U8zlG + F4/K4a8cn/1g7+1ywxze26BHgfL5eWnnU9K8S0RR9Db2M+RD82F+ua8qn37jc6LX/qK0W+cT0U4v + 7ZS4J/8l3PXMt1Q5TQ9bIZMa81+GySmZPrXUMltfE2PVvacUK30Uu9VcrrykSGIOXyPXH1nBOke2 + nrOIFHqtecQBs7I33S5nm/tKNkvNW9Z7ilu4FwuZPZ0EP7Hd7PkmPJ1X36y82HtXSObbeS4VMa6F + 4xoZpbT84vMpcbe1fR13umeFFnymi2F9yGRt6PaV9FrRO678mY+XzorXE0GvTj5G/dL1U8Zb0DtZ + IUb+xjLJmbaiPcxGg1ex/G2kX6KXyitRFQu0nqOf9M+DeyoTuy/MOJkCPuFRjo7hom9i6FjdU0Ws + TRYLn/RF7JjnyH/ZyEs4Je5s37+AO6sSaEnMD1Vq44vUAq0INfP+TKJPVut97PXeq5PCS6Ncu+y7 + MXNiCvgGzbafLnPOvFHhDX2RVTiOdVptxy9OqMhiS0xs0xNp4E4HzkakKO7MTyiK+93DeQ7c6bor + 7lVISnzPF6Ccfwbcna2lCcdyyn0xvDUdbLf7xpHtZaj5xpvFQsneOWQm3Q7Eeicx/T3xmVgEqggS + RLOK3SdjLWRn+TTfKVfbNW+ya7ICVyrvpumJcY8Phnt7Hty7MxP6DXXgPqw7ORyRzHL1iuQKd3Hn + aiSfnFyTQtfuAHei2YonE2mjzE69hNbkvU3stOTK2zndd0y+UNOAo4QS9OkZcaebJJ9Wt3ZmxNT7 + chbcZ6OWBh58v00f9Tc8Hu7sVFjf2gb3oB6OWncTpbllhnXnM5CceC+0ccfd9HYNJ5jj06T2m/Yj + 11Wbtnz5qQpE3W5xZtivjRM3XJ/LvvPxRj1Mpl56QcR00nllsKpvZ8CdL23UC8V10WhJuY2Wb3JF + fgr39HnctQ9Yjs4HJ03VJrcyNbDUDHXZJ8U9Tdxw1UvjxSbTFmy8Hfsgs3XXlqa48PxN5mphpC3r + ejs3ekf8u76dNlVpvcgLLK8X/FM9gRPjHejwK10caY5Xud05tRTuBJVbWl5clDJKOgkMWQ1evkXn + wQ89Zqrmq6s2q2567YvIpWj6uMhmZzgguX5Bq/RY5BJ9Xbcg767QCkkrmUmJNqEVmnxvadfJcodR + 79PUbl5ZZWxX2ObzDsnzadL36Z7reTN7aHIypMHCB9c4yH3RDteY6/2LmDSpebZLSbnvWEsrFJwV + dwg6pYA7BNwhCLhDEHCHIOAOQcAdgoA7BAF3CALuEATcIQi4QxBwh4A7BAF3CALuEATcIQi4QxBw + hyDgDkHAHYKAOwTdB3djvxCBq3ERTPt6QWp9Z4dSjIbru78ycrJZ/Vxp2JNWT2P2WtXwznYuUYfJ + ppPinnz4fGziJqt+Kg5T3KyTr/3GHP7NevC+kwYBJuk5qhoJu7+cQZXYkYBnI2a6CRKvs7+cEfcv + BThrcqY/hbtfm3Pjr0UCAu5Xz3XgKIHRtyaBMiW6IEdv6y9nKCEVJXqOZDmF0K2aBJrrLyfEvbk2 + jdjTn7kEHBONcefMKUljVxOwTsyN86FO/RvXo5RqapWsyUtsjEHifunnlkOks0IvtN3A3UnmkxJ9 + scX5Qnsrc+KWF7PtHPw1z3dFiQPsbfXvWY/fxb1IJFAOTdzjjI4Q5HVd8NM1Vb9g3dmd4WPzIWUN + 4O59j5SafXP6jeOgqJLNw/hMqJris4byzSlI6oKka9rMgSQzbzAiBfMFjZz4gBZGuk1G+a6kJ05O + c3ytmzgzYc7poBGAyynSGcyFjIROeKyA19Pnw1c2Rs8aryHXOdYrcR16AHLeTetZCHKPCKzXyI1k + BxoLXoMGZ9vtVJXkD4q74bYPpzvpAYPF1ap8u3zBpmyQoNJGG0wni+/eeRgB9x8nnYH5QrDWJnmm + kpc8MZw5hk6/yzW70U5hZ06/UcDllpYH/EWuWOQ4scGXfv3IU6ENWrfu7Apx9Si6rWRBKNwYyuYl + cZ/dmFNadynKDnd3auvevuIlSNR6r/bcTK3RseVE/rjVbrNWfCnyjRm4B71sHXe9YlZW6dfPWh/J + rxnWnf1+rh4r3KU1a58spvUncW8cA1lxP5/vbp0UcvHdzfl9d3a9bfsK7ok20YxM/I9b5mHdx8OJ + lDj+etT+FnJW2D/vzkwRZ0diuUf+v/imCZyS4s5uC7vpEjY8D2fGjlRNryWf538tji4Zc56eGUtX + ybKhStIzU+zokgmn7Zkh19v7LzRVk9iZJhHr87Yf02oPftWXrMcbJdmeGbhn6ahd1uR0BPTehWrU + mZGMTOQPdvvAblKokoSvvhrtcvBpnKo2erkLvZyk373IxeVeOGmNOfFns+bKPGe/uzGmtc8/JTXz + X1PT/HZ8We20fJN65gONzd9Pj+l9yLpKfz5oOfnB2EviLAi6W7PsPtUX7JU30/Y892eYJ3yqWsf1 + 6o9Te6FP2hH5W4p+gqBXwT1FXEPoZXCHIOAOQcAdAu4QBNwhCLhDEHCHIOAOQcAdgoA7BAF3CALu + EATcIQi4QxBwh4A7BAF3CALuEATcIQi4Q9CT4V7z5ydMm9KufprVck7X8xdAX7w8MY5Q+SXX1csp + 1CRWHhWxFyllZ5aXE+LuvPt82LxtZPfjSFHJZ47rmwNQvYGS12BK/UpxpFh9OYc4njP9GRkxrM8c + cbS/nBF3H78QYbMN3E1q/MlqXF75NJLYZIlozTHANLASr5KQquC71igs4FviyPAlqCdJ/cB1cQXF + Ejcx3yRI8Y+ERDVfiLA5jqwHxvNB0gxwDDzHPozsh4NJEeuOa7glVyl4m18ygO9NlHONpht35qn2 + sLKnKFx0XJrqq6az0Dj/HBI1Tbe4u/8MMu4ruZnScksohLs4NMQ6hz71uRvx6r2hvXL0x4lzE3C8 + TA9X/ns3X7YabbbzJ4vvLjGdI3uvcfZ1z57OoLovW/fJkJUpk1ZmDsHMYffne5qNQYPcF8adz4dL + APebPNnuIJ8Xd1v4NU2Pkc5g+kpupo505Ni9ddLo9XIBXGgrhzJ547LEhJQMBpwsDsD/A1NmcWbO + lc6gDHLEvWoKhDmvMyMl/CrumnQmiocSJOmMpCxQ3DVM84w7fVnSVxLiQOsTHubLw/mByKTw2W5n + iXgtl1cvuB1OLpMQtdV6xp4Zn+IXnJnsXE7Jl+YkYVhynKtAcpAN68458zhjTxDco/TT2PR6mfNu + enk4oYOnGyi3gvTlHNJcFJooLnCmIhsln5F1N+g8+gncTZb0kJ9c2eWcJQVHKKVMMUpkfSN5I9t4 + slAklj3noLROE5o4D9q/72hKAojQes7O8XIOyeUtksNgynHkGuWXG9x/0JkHvZCAOwTcIQi4QxBw + hyDgDkHAHYKAOwQBdwgC7hAE3CEIuEMQcIeAOwQBdwgC7hAE3CEIuEMQcIcg4A5BwB2CgDsEAXcI + Au4QcIcg4A5BwB2CgDsEAXcIAu4QBNwhCLhDEHCHIOAOTVNrz4KoAe7Q+0re+1WOsawphOy8pM05 + hSRB2X3lRlb4JnkYR+Ix5yUFn2bYAe7Q9Yvrc2t5lXBdk0mGOYPdkqn0/pn3YuebE0ZPPldXehFb + pbJlb8oNMosC9ydWleypZNMLvdY2hRhzYpNPfzItNoENZ8y08O64Z00+Rq+hcVbRdRXlJPFlXU2B + O3QgcgDa8A/IFwice90w7pzek3OUE0zBx0zf3xv3KknRpxg4QXbxIxdgTyPcTp41GzpFQ5VsJmGe + Ou5sP3PgJL38rniCS5N5ckLbu6cRZtybJtOVRLpeWx8PkSQeOols9tYM3KtAnjU5ufGpcPJgFzJ/ + dwrcqRrWXpImTdOOO6w79Mm+jqywEEUMUFzh3gj35kNsoZwDd+N98OxrNS5ekda2Vd/dyo0JuENX + VcRAesewVDKTXu1noAVBjCX9zznZmz8J7pNpRvthuF6aKRktcxwvwB16x7x77b/O3EYl6+5zIIik + kSoejKUGoefvTuG7q/HmdnWjQnFVpZqa6F2Y5OXfO2aA+5O3VaMTjItrzUzNxMgXvNTJRCd9NoXf + GZPYkN67rNqtbrgkVCj+ZKmMzYlRb70fHrhDEHCHIOAOAXcIAu4QBNwhCLhDEHCHIOAOQcAdgoA7 + BAF3CALuEATcIeAOQcAdgoA7BAF3CALuEATcIQi4QxBwhyDg/opKzq2CPtuLVAWm1mrp39FVrXEs + tZE3NCmlxrvkPdazZT1YSmuKxnUvxQD3l1Lk7ARu/ngZVct41QG82YceOSn7zPHoOFpqnkzIofXI + 2CcSl1YrdtPAUObKYQH357XtzGtdklj4cnA5eaWDi8rhRLNXkiQuKocNM21ygYPV5XiuI3VU/bS0 + UwhSr/mTR3z3V1IV81YI0ywWj3BvwQcrZn+g0PMSOM8B0kMh81171WiT6SHEOK50Co3DdTli3aVw + siP1uUd2J6teNSRqIlQNcH8l0S0+MgPem+Zjj9gePRlr+uxWtwBasxn6jmyiGkkTXFgZx8yhUDOn + bWr00s5m3CVPR1oHvE7Brd044P4a9j2IGSezbCSuL9l7Q7d6u4pnqriz4eYQo1RBTHfqk5l5d8RQ + yewvmMnmVjPnxzjXfczZvMGdjqPdgHfg/jiSpJGJoCUXRoP2Fp9DyIWdmdDWuJusEXNtR0SCpY8G + aR61Q0OoT8HG7M7lz1Qfqnhhc7KahHQGL+fLyN/McaA1xD83OafEwX2N92vcGXUmOPjewcG9OB33 + LC8cUFcTZERJFuDTiY7UJq6pZrTHezoD4P5a1t2H1CLnnuHsBFl8d0dYpLh0ZCjuhuhO3i++O+dr + FEe/0LuaqLnK6ceCfCdZbIw/kzfDPTNSY7mck9UEBw3pDF5LkhU4SpJdlx1nkLQ+SCZGH4Yzo/mA + yRnIzrH1Nh0S53vujiz5YBr3u8s2kW4CJYRztVZ72km5jckh8wuaqq8mo1C3xfPQNARt74poUgCz + usQX6eL7giRpDerJDnRV2v72JunugTv0SuYCpwAC7hAE3CEIuEMQcIcg4A5BwB2CgDsEAXcIAu4Q + BNwhCLhDwB2CgDsEAXcIAu4QBNwhCLhDr4adc8Ad+oELXGIhRTtNxWXHL7qgTi1GCUpW3a/PVC0h + jN+0rk1cFCqSRCbm9yNKVHI5SyhgflOAO/SRWlDFKcqrnbIuyJMNGoDAhV82tcZxAdwoX+LwaKLU + q4Jbl50+pH4IwB364ALHSHBFlwz9TY4oL7zAxcJcCVi/jjvH+Yhc86bmwsCdbjVO4s7nGewYspX7 + AK1l4xxbBLhD79LFMVvIlhvGqsnbJDQF+ebXcZcfdAy1DQP3uQh1seMuFP2TJZRxqMAd+gzupuNu + 17jnoBbzPrgXpVfLUhZPxR3gHviNJh0B7tC3cCdzqW9+HffYvfQN7mTgJXZxyO7Qut8kqB9wf1nc + U3DkJ9c74E4tz2w2uGszmssWQ4lHuKuvD9yhb+JuifIOlPv1Es3tzhl3botW7oxpR7iLkL0D+jbu + RTon3T1wF97TCnenbVHuLXLTkTPjTLmFeQfuL4t77HTdA3cugvoz/U5T2iQeeiamHXpmoJvj7gif + ygt//zFTky7RVc+MiBoSKeQJuEO3x92E0Ty8Q0dkkW7Qte9uBXeqBHl54grcoVvhPkYXrJyH38M9 + zjZ8udN03PsIhw3uAbhDX8Y9cRNxvLUhN9PYbpJ1N639YrKaGHKq8uhose6R61+dbEqJyzMl26RG + SAOW7wQJTVXo87gTN9mpm9DJd93QurD4D7+itOpXtGPMjMujc1LsfpaGtCxN3E/Jb9ARCX0Wdx2f + IvlTBTF1JqJ0/P0y7lMidl2bVk1Vec5Upy3uWrIyhlDiMRP0mWuclCxTi9VsTZyaUnLEy3dN9ZtF + aiHPvTRNypRKHSVoXF4t3rzULl8Dd+jhquB9wAPu0CvVMpwCCLhDEHCHIOAOQcAdgoA7BAF3CALu + EATcIQi4QxBwhyDgDgF3CALuEATcIQi4QxBwhyDgDkHAHYKAOwQBdwgC7hAE3CHgDkHAHYKAOwQB + dwgC7hAE3CEIuEMQcIcg4A5BwB2CgDsE3CEIuEMQcIcg4A5BwB2CgDsEAXcIAu4QBNyhe6pGA9yh + 11CLORTgDr2Givc+G+AOvYJsJtwjrDv0Eq4M0x4ScIdeQZFo9w5NVeglmqlMe24TcIeeXqYEdmXq + BNyh51cKD+jKAHfoe7RzM9V7aafaBNyhp3bcndBemB6bszXAHXpeSTO1P2Fyj9RgBe7Ql1X8ynFn + t8bekkgD3KETWUh5mupzd9n5Q/0Kze1d5kyMDbhDJ2umhmHR3ddwb7W8ey9oOi6hNeAOnaGZqrTP + 3TGM+2eHRbaoN4bS3nOUqE1gXPiRBgFwh75Gu3bKLOPc46dxb9VJbz1VlusDy2zg/s3UbxnG1gTc + obs57kq7W0zvp3HXYQdd6b0fKNIY5qZwy+GmE0iAO/QFt90qsnHrfXzKd7duRfs744bTwD3Org1w + h+7iyORwMTCsfg537c7xITv3QV9ODHWNewDu0F1U1TLnNH0Z9+7yh2ibqfndB1OtJNlp0Z2HBtyh + exj37A/GhX3OmdEnU9l2L95+wGTtz67s13o5gTt0M1S6375rZX6q391kNe2Km0kfYVd7azZ9oZcT + uEO3dGXC0YSOFj6Du6z1hT6WGfcA3KF7eDJq2xdPOtk2+/OxfWCv05XOmJbex70Bd+gutOsTomHc + U3S5mMmoP5+jK+0j3LfYmtRo85irubZ+6s0F2c404A79mkaneR88YHoUgt569Wuaj4a7XFpp65yV + luhhq7WtcJe7QqoGuEO/oyRP/3MJA3ftaCnrB6VjzJhxwV24KIyt2yyJfKeI2we0W9zrgrup+WCn + wB36AZkktp38jtyNcZ+7l9nDyWH7oNSIId8782XfXe94Yneeq8k8yl2HB7d+uzBaTeT3bjExFrhD + HyEiVHryPdgk17VrI5Vg+DPdrW8yz8lt4RZvJq8X8FZ58YKqHW2ENHYSF9z19yxwh37ekRFXRXyJ + ohCmvKLdHLj1gcx+3tBp9t3zS41hq11Crx5RbwJcOxbck/c3insA3KF3e2Ssuu1VnJPU4VwcdmpB + xt2wLwY5XDjlXEXWffarfTSx9QIz483d8+wRuV5LnPj54SYB+oA79J6Y5RBynf1yRnaxzPxpxr2P + XYyHox7NrllaF3/IakeP1fok++xOjOLOH0q9yXAC4A5dl5r2UAYlBF7QuRfZzbirnV6sr9331Szm + ffVg1fpVF6YZgyRtrzVq1fsrb5n4BvDvY4GBO3QNjcat0JBdbWuP24r5rnWHO3fSqPVNx4PaDQ9C + WKLspf7YSrwWwb3scM+pNXGBoqAfb+HNAHfommmXB6lx06NYmErxwtNwRBT3nIb7vcJ9a421TWtX + XTV8A2CujRmR4nnbYdXpDuK04SAVKd2ibwa4Q9c7ZLLbAZZ4fMzcW6IOdlWy67C+yV8+aZ37bOZl + I8gkN0ONsM+VI+kAnLqe5+e7oxP2T6qAO3Qz255DiHY/8ou4lI72SE1P+jowo4K7k86UuvLK86Wz + Le57D7Gn7Dudr9T8Cvcc3fzoaj219RbOO3CHjq17rQehH7mt2r10U5uRaUlWuZ3G8JZumWVa6+7m + UGdT3bt3os5XSmvcDxR6p/8/O+/AHbpCxhEa0oTkFmeU7kLHnea2t0rj0spkQk06GB2fQu+s18eq + RD7XgGZHs/cK7mLVbzGzCbhDX5Ha50BNSO5TlI6a1C22Vetr8iA0HvBZ9KGV6bjroMhahseywT3k + 3L0aNxq3EbhDv+njrJ8OMdBFLbbVJ6FWh7so7kdzOnrWj2IUd+3Ed244OWP32o5NLblVj+YNnHfg + Dn0d99C7WCKT2Abuefg0w/9w+2GQcn9Q3qXZSq5QEjM+oFavxpU6QvHFVQ/PDUJrA3foK2ph3Xws + M+5p6kPYxwquez7OmFUrgGcw6bBfiRbZbwGLh17FtW/N2CXyxtzitf/e8w7coS/hnufBYdPKussU + VgkOM8bT5LFysdUurgx77TavBkO2De5xtEvHNCa7Hmv57847cIe+IjuMd+ruRZyHdemY9plkaZiq + 56O4tup6PKUUlpVMWPXSmzzjHpZRBWOgvPl35x24Q98w7lkmHXGfY53Hucx9MnluXY7VudOyFaZV + R5uV1aCavPTSqyekVadb9zpXnekWw2aAO/QFuc3oACNDFeuYecHd8MxwWZ4lla1jMwYAjykhdeCe + xVlPaX6E2tu942HUNO+tAHfol1TXM5DEKSGnPc72V6e06kOjtOI6qr8else0aZm3l5cxCNHNoWzk + eda4PQzfP/VRxob8IgvcoZ+VyZun+v05qptx1xBL0wp3HnrDnZEXc/fKFveqc/4Wmy97Tb1+zQ6M + ThExNeZvPl8F7tDn26lzn7sY3D7lKPrN0F+rjA5EU3F1zNde7SqFNe5k0I3bDhngvcY+7XuJXDYe + 6X63RxK4Q5+WgB1H0GtTlLoWBnxNx+jaTe4m6XYv81CZjfM+vHOuLX2/oy7V5fHTEuF9ub18M6cH + cIc+rTw8czbrqY4JHWUklDFRJuyx5d5FDdAx8ZEfIJFaK8tYGPLq5dGrjhsu+XJsZLxsPHw3GSVw + hz4rHaPF7VN5JDrPTjVzfMgWJRZSCcFebjrGB9Qa87pzvmq0d7XcKXbzPoLJ582U15Ea6rvYAnfo + K657ZXJD6g+K4h4iHS9g7L533CzTkwJpPUlpGoMM5LahTsxw3rl2ubJm1JScXfx25ztwhz4rp21K + iUYQmdmcPk1Vc/sJSrQDt5vLGrJdYv4m+h3XOFLwdkfWpu+PEwPu0Bd8GX5gVCQyTC35S0FKySxv + gA+x7I106yNqtBaY6m6fLh64Q59UWoJjcFBeanB+CR5av7plSE3bjJRcuUJmBMw25vZwAnfokyr9 + gafdBTf9AmypRpdZJd3nGIA79ElSXMdchw58fz8t3Q864A590nWfMxLkWwQnBe7Q2V137Xh0Nwm1 + DtyhB2ipihNfgTv01KrzWK367wEwgDt0bsV5rFYL/x6sEbhDZ8d9DOHNN4i0DtyhM8stc0bdP/VE + Anfo/KC4xWOPD9sTCdyhz4GSl2nR9WF7IoE79Cm1Fe72XwMCAHfo5LiHhXF6H4E79CK4c75J4A69 + CO48aqYBd+g1fPfHHTUD3KHPgZJXQwcicIdeB/fyqF0zwB36nFZPVbkn0gF36IkVV4i3fwj1Atyh + B1BZRXk04UEHiQF36HOyq9ik5lF7IoE79DmlsExiMg64Q0/vzcRVu7UCd+iZzfvKeY8P2hMJ3KEv + eDNpwT0Cd+iZtTLpBbhDT662dD/WB33OBNyhr5h3O1t34A49t+zcWHUP+lgVuEOfhyWPhBsZ1h16 + etWewt0E4A69hHkvij16ZqDnN++SA7tJxnfgDj27eefQeTHvc2ADd+gplbKmiQ+PGfIauENfc2ey + 5s0zwB16EfvuHUKiQq/hvqdaa0LAawgC7hAE3CEIuEMQcIcg4A5BwB2CgDsE3HEKIOAOQcAdgoA7 + BAF3CALuEATcIQi4QxBwhyDgDkHAHYKAOwTcIQi4QxBwhyDgDkHAHYKAOwQBdwgC7hAE3CEIuEMQ + cIeAOwQBdwgC7hAE3CEIuEMQcIcg4A5BwB2CgDsEAXcIAu4QcIcg4A5BwB2CgDsEAXcIAu4QBNwh + CLhDEHCHIOAOQcAdAu4QBNwhCLhDEHCHIOAOQcAdgoA7BAF3CALuEATcIQi4QxBwh4A7BAF3CALu + EATcIQi4QxBwhyDgDkHAHYKAOwQBdwgC7hBwhyDgDkHAHYKAOwQBdwgC7hAE3CEIuEMQcIcg4A5B + wB0C7hAE3CEIuEMQcIcg4A5BwB2CgDsEAXcIAu4QBNwhCLhDwB2CgDsEAXcIAu4QBNwhCLhDEHCH + IOAOQcAdgoA7BAF3CLhDEHCHIOAOQcAdgoA7BAF3CALuEATcIQi4QxBwhyDgDkHAHQLuEATcIQi4 + QxBwhyDgDkHAHYKAOwQBdwgC7hAE3CEIuEPAHYKAOwQBdwgC7hAE3CEIuEMQcIcg4A5BwB2CgDsE + AXcIuEMQcIcg4A5BwB2CgDsEAXcIAu4QBNwhCLhDEHCHIOAOAXcIAu4QBNwhCLhDEHCHIOAOQcAd + goA7BAF3CALuEATcIQi4Q8AdgoA7BAF3CALuEATcIQi4QxBwhyDgDkHAHYKAOwQBdwi4QxBwhyDg + DkHAHYKAOwQBdwgC7hAE3CEIuEMQcIcg4A4BdwgC7hAE3CEIuEMQcIcOVBLOAXB/GYWCcwDcgTsE + 3IE7BNyfAfe/ov+Mvkz/8Qtdmv/4Q//79+3t77hospK+fXt747f8rW7Af2QNXoVedWf0gfdodC+G + PxtZV7cD7tAv4v5H9Z++TG/6iV/fJv0ri95kZV1LL19f8W9f1Lcyusobr2r6vif+St79lT3wtvN2 + wB36TevONL/9JQzf2Py+0Xum9e/A/S+x+Zc+yeXiz93S09v/aGXCmLf9OwnMijvv0tAC0/fNb2nx + f7QXxX2SKvAm2wF36Fd99zeG7r9uafnD3258xWT//U/f/KcW/b/1Vv8Jskbe/9EbgZHVdYHp+xby + 6Vva3Bi5C8h2//VvgTv0q7i/HeNuBu6GvHAzcDf/GXWCxDNZ0S2u0Gdwp+/eZFf//f0PuEMnwf0/ + 9VCG4z6pkf+rH3lF8/e/lXV/Y3flPdynjjs79eu9Anfo93H/y/0lb+KBs+9O9Jo/b9pUHT7H3+GY + D5CnFe5/2cf/2Lq/0TKtT2/AHbob7n8UVH1l3MmPeVOjPPP+p3ewHOH+H5P8Me5/edd/eq8PcIfu + hPvb29vA3RDuf9UOv/VemFV3zBXrbnjbT1h303sg316pYwa4n9Z3fxNfg72Wt+6ojx7yd313bd5+ + jPs0Otx7xQHu0P2bqtxNzu7MxM9aVx2Rcw0YPTMz7v99EndxY7ihgH536GS48yOmTb/7yrprv/tC + 9ydx//tn1V8P3KG74P4faY27LNLHTGbGXftt+lZ/e//5n073W398+p8u3OOuexLsl+2AO3Svnpm3 + TrHgPij9s4xuefszj5/52/toRttW27djzIzUENP3vYyZUeSlcTC2A+7Qr+L+VwcRvL2pZRcfpeNO + b3VUV++Z+W9pvQrvPMTxTb99I3S7t8LGm8246fum78T95yW8b/GHxnbAHfpV3D9xqQ6vVR9NcOXb + j/diXooA4P4wuEPAHbhDwB24Q8AduAN3CLgDdwi4A3cIuAN3CLgDdwi4A3cIuAN34A4Bd+AOAXfg + DgF34A4Bd+AOAXfgDtwh4A7cIeAO3CHgDtwh4A7cIeAO3IE7BNyBOwTcgTsE3IE7BNyBOwTcgTtw + h4A7cMdZAO640LdTq9FlUnCu2MtCNZtS6+9LCO6mxbb0qw24A/ffknVho1w2+NWsSyMtbfI+3vDH + I+/QAXfg/kuw5xXojiSfyyhaW33dJmEzhNuVm2tazKEBd+D+GyVYLHu0HTrTKi11id+nuR5k/ium + 3d0O9yimPYZ0V9wNHVzO9dOun9Gb3m5xfe8eZbLl7fSkTnl3f6xR7E7Oyy5iAe43V5phL9uyGHZh + XGvynTrrZZj42+FeeYeJcL+vdW+eaM/+k4AZb/WuuL9R+Xdxb5On//U33Bb35Hlf3ueVV+czcL+5 + IzNb9oOSmNItP18DkzrtdKFyuNXh973f8HbxTdwZ4Ey4pljNJM3y1IyxtEIVo5+i5bueKfSp+jgu + Hq0+n6wUw9Qar8XGoluPZPRmwK/VF+Nr4m9k6ViH6gmz3fxyi2vRZF4UaZXGi2nHy28B93+07fa9 + duTs2PdVzc1alrL7esP680+4k3V2PvsgZrr5xh/IfWMUI7+drM/8QnQygcUTwLy67kHWYEZdIPvv + g8IbMn1wZL5TsM6HRCt5byY6g0HfCe6Nt6veDR+n0t2GtuJf5R9hW1/T/FvA/XtqA+T0bnVwrZCq + kU+Zl97IrRTjToa93bSr51u451IcORrMqK+GXgj5QJwRYQS+GF5vDZl1AndSkgl3AtxkxZrX9QN3 + efHdwbFcQwJtLs4MfUMgE+79nSprlXLdg6Hljdcq8pbKVde/Bdy/3yvyHu19hbq2xZF9+Bud9dJ3 + WEO9N+4s/mRd9lUYEyYZNeNbYcPrXWNrzNgO3KMfu+CNo3IeM6FJLXvfZM/UDCar73gjxr2KxWfc + 9d2COzstSW4zljflRck5KkeW/S6/Bdz/yXG3H6xg1veCdEPXI/dfd6HdG/d+DsjXiERuDEk9jo57 + 9DHnYiwtNGvcqbmqjo1heMsa9xz1cV2I2WZuiq9wdzPua+suq0uzoHK14kpCpaFViH5eefwWcP8n + 4x4/WMGte1Ec+zLxltWNfZn7PGW6xJ0dEMOEi8O+4C7uc2xph7vhbkX1P/hvVmeGrAE7KUnPUhRn + nz34Ne75AHe2ImmpPD42NvK6Y3aO0uP31twV9xzef6DZXfu6hj/yPdvesKEqzlG5O+56kNa7wm1E + AtQqXGRtGfDga/ZGcSdfR/wUsu7ZF9d7Jb2L0qwk9yNwE7Z0NKlZIF05Uk18YqfHZ/m/rPDlTiG7 + bER3E/ZifK5CemGbv/wWcP8+bu/44WXj2be5X+ZGvYZmVKZwJ19m9Zgp9hLUnK2tzBsfP8FV6Oh5 + SIULuU2tEO70yUqFT+KuZztuVbmKQ+OsOB7zvqOV3fN2NdtK+6qW/+e9loGv9HamPN83S4i0Ssq5 + ptp9+dVvAfdv/Xh+z3E/9GUIzHIrX2bssN7Ll7k+iMCcy2vIj94BeY6eGVPLO3Y1bV17p+ybcFNf + RnzWejLck/dnYkR9J+D+syqbfpvW2S+3ssWm77CGcLbx7iad6rqkZ6H91LjnTTdk7eyHW7Ur7bzD + OJ0Md+j1cE9h47pru9bEm7Ur9eZhSggJuAP3k/gyZW3rY72dLda2gA13a6gCd+C+w3F23fvomXwz + 497mHVrgDtzvrTF8zGxs/e2Mex07vKFxN80Ad+D+/ZbkDKNb5u9RVSjORXvZn/EubSk6t5qCHVej + MU2yn7tnmBrnWSjG7nx+G1ePgOctiovzeqlY4A7cjxQ3rruZ5zzxI8gjs6zTu13awTbobG43HC3P + dwvdn/tEg1UmGmYzV8d1CcYPxLKpiG71o3FfaOAO3Lc4prXrLqy1cODXtNn6b4JyuMHXPGtqOySB + tLyzn6yCcal/cX8z2pdBfbC8WqUA9xfH3VRyM3az99Kx614XOx9Wrda0nvPUtgy2LYxu67rbdLC7 + a+5QWOCtK47XLYHtjces9+wu7knA/QVxL0e+STl03fPKiV85yu2Qtb4Ts/u+rX0lt+b0o1bwauZs + 394d2fbVnaKsKm27/BHg/nK4L4E37AFadWsl7Xbyatn54fvJUa4T6S6/HZ52Pqwn7zWetUxuza7Z + FCrl8YXJq3LWy5lbwP3VcDf5wlZfeAGrJ6xbO16mraV2W6JaJ9JeVobtfrKNa8/pE8Z9OxQ/bm8R + ddScuvbILn0Z4P5yuMdj13nXDTmPFuvMbCpIWm4O2wB4do4zsMQx6D+zqQGxg5nNJzx3rS52XbHS + rja1UQa3qpXtsk4D91fD3YZD477rhuzcZF09t3Tgl+h7m1dt3qi1oGhnSVk1NJenVr2elI+dmbj2 + vdc9LhtfiauM6buya+New+UNBLi/GO6DFFfbO92Q3eOpsjjOsKo1TqvK0ooru52YqUO6mRnrdjcV + Fw4eEW3P1OYetHbd076dbDbN2c50PGgNA/fXwr3f4S/iTe9GQ/aPYiHjtCM37sBzm53ksVFbN4jN + xhrPq37iPhTX7NdN02H+3vSX9Qjm/iEB9xfGvV55vrMbDVn7w5vFeu7JXdvcut5JZBvv9p5F2tI+ + 3J5P+DJtXf/aCv1iZ9yTlsGuDyId9f0A99fCvVzBzG0br4paqd0C27B/VlnWNnfj8FfbmT5wvWfa + 0/5x59UepLKujpsHtiugq1rxTQUp4eA3gPvr4V6uOg5uSz8HSG3HPrFZb2bXzjZ3TqZVDUqb/qC2 + 7P+DTvd0scHGmaLCtLkCRamJvYLEdY1twP3FcXfXHQe7aSVybPd6afvzHqm8rjO59GqxaQ2Y7YOt + Le3mHbdr+1jJrlrVbRTLMudx31epay31twD318S9Xm3A5p1lHbhsnsbbBamtv9EBc303dU3otpvT + 7QYeXHe71kMbR31b/JrhzVhZr27uUG3XqqjA/QVxt4cTRcuuI75uH0TV/dPRPF0sX/WsrwnfPuyU + b/iR/wo7e2XkzPqx6/pOYVdl1XUqh1Vs2ydhfbX5GXGGM/OiHZGXDVWzM+6zp70ZM+AWEx7XDnXe + 3CFm9yUv8KW5rz9aGXewGuqero2LjBe9kFKcdNAbTztcnhWb1eZuPj4L3F8RdwKkfGjcV8MhVx/t + vudx0zE+7QaepYuRiTrPm1/SpvqV6QPc59RoSez0+nnsKs9UKOuamLbOk9s3VXlDyc3kvh7hyR13 + oB5NV6mfn6jY9FlI20wfMO2gv+oyyU5fLXHm0HytRrtVFqj4Krjby3FZaT880Wz9graw7zQ32WaM + Y926Mm7f0qQr1Hmtbd9QbteHiWn44WbavOespbebwpZ5gsfGW1/V0MktDtxlbqZvBMs7zuhkjha7 + zwcn01DAPTrkUM4Xa3G59znU8oirylkU/ZUqFldZoMKr4E4XP14s2fXZ7fhfJgXRmrbsxqR0h3w/ + 8HzuB2Ta51Cru27Q9E7v+3qMb+8XZbat/u52VlMsaRRh09oYvahxusS952bio7R6v5EsTZMpEi01 + jRmBzSRaYGWhZE7iwNWG16wjrKrsYCRhGvMX5YUDrSa7rGpM0/1WjpvKocv4j6aH8qmOYKh27NmH + 8TtGg8enOeA7laWN0nq9ItHPVUy2GcmgTOUDNGZqrY6EOnpY69mWz4m72Y8jcRcDxup2SV11/Dnh + uLsomo+1TetRxZvpIU7td51Ha8XN5Kf67kCCZVBZ0rpjtVvJbnBveeM+5XV9zZ32ZeDlZW4mDr3u + s8aXllRIVDuJ5yKJl5Se7L2TrEqSOUkitkswePrUe0Y93yd66qWepYnMsPeazcmOX+C0BZmtr+G8 + TpIvhBGVX9BlmbZrfX9i6vt+K23ZNrgXLQsdvU/N694Vdw6lnWUb+htoHb4j0AqSLUFNf5DECZ5K + JsVsT4w7s7CKo2wOUhyUbcu1jw0QFzrNuLOldwN3dzHMsufp0PnU88jz/txfr5z7YCDBmF9lzJwY + re76hgTstbvueo12Y1Z23PzGPjdTpCVW05AZycaUOI9BjJJZUtFwRIPkYXIT/zxTWkcuguKVvMak + jSRMmqVJPpWak+RZMlNxinslhk2VPGRih73r6aEmqQbVeKG2+zSCKO/PTGrdG1cmBlSzOHFpXRzO + DFUB9tCs7Nhqqqkw542iF6rOZsZd0ktt8kU9Je7CrSt6Jy1H0+ji1tzHMTZAltWRbkayC+tjHrcb + ADbMfdEnsKuxBmPGRoz54+l7rURxU+rWUXLLkPa4In+kOZM61fTOYdz2Ny5zMwWC3mUGX7I0cQYC + uv1oaqagv9ZTbTjNnEQ1RPLn+cB+cpo9Z0nAwTk6JEuTplwippknNtz94bLk1HMcHSSzQdeUZ/LD + RtMzGU6ZwAUqHXcx3HO7IHlHJ4WHXGtZyFy3acGdlvFuguzBclo/rhAmcsaFnOfdhDlj4E+nSzjF + 1Oz+NDRfTMpbuzfm0quI3bhm6U5soz2a9fHSBt+8cuZX00G2k5rC59qJuwRqddQYt21zqP8UlXA7 + +/yHU7Pn3EyMe8yOnGfJ0kRuORFTqqRmSjvcJXMSIZa53JyNiTfb4C759aimpI47ASg1QvOoSu4y + zg9iuK5wLqjIC+SHCXczcA9cILXuThH1cdugFR8rSn32Pi5N1UnzgwTZg+GjrZIcU5Kj5amnLJlx + z+YlcF+FzTgYIWl2HTW78F9x2WhpTrp0MGs1rG4Ig8ttBIFPeY1mH1zDHe8ircfAz62JzYypI9wF + kiis0NW3nJzHS2qwUje4F82c5CT/pLrvTWa2iOsQ2sBdszTxS2Zng6w4d0fWnu6GnfukjhB5OHyD + 0fRQddL0TIL7nF0kS5YbckzSDndZ4h2XVv2UGffU8TdR/Cr6ec3Fc4S75pTK+dlxZ0dh5qEe4lUu + +kjiZTe8na202c0dWk+r3rQFytGU7o/6TrdGejXddtNLvfHM7MEKl7mZBF+i24ubQO07cuorZwf2 + /GG0/MgBnpbMSYooe/+9Q8+HsqReqmyrszoWjZ17TzZU9qlN1SKZyqiSCbB+pIcq9L4ELoah/dWe + XsFJgYokNO5NVfbP2YuRsqSxv7h0ezpJ5lq8eEVOGsrVaQbY4cxorig+tKKr+PrkuPMlt5wr+NqY + ArPvmLerhydu9F7ZOeF82XXvyGd1WdOmttRv0b69A41WbDpY1dnVh91lvMzNVGSEGVcKaoAmW0e+ + pNjLPnHiJe4hXDInsZPj7Cqxkg5RK5qEaWRp4qRPks3JOm4tuV7u0hvQTvoExQZYTg9l6UdyzwY1 + r057dvw7hV11nbQSHbcNCq0jZaFto642ae6oSZ6c6R64dWG177kVPpAy3MGqJS29mMal58f9uqqL + GzfD1GKvFTxVO1eCDZKpzAEay8bMtjjXkU/0I7lwMOeUG9nuIO+OaeuolfbCMb33IIK4fexkf9Z1 + 9u7uJD0E7re/kdTNdTW22k+dCBNvG4f43rhvR3+WH+YxR+D+SCpfdfPPjvvOCKSnv4LA/dNK+as9 + lg+G+wsIuH/NtLsYPp7X+mm5ivMK3M+oMfDgM9H1IOD+2KdpzA5JN02YAwH389r2NJ7hJpwR4P7s + fnub0/XhjAD3Jz5J82gYGHfg/hrGnTtR4LkD99fw3N3iwjecEeD+5LjH6ThmNQTcnxD3UKq75QNV + CLifVDGEW4+WgYD7WdWuzrSCgPvzyYJ24P5K9t2tQnRAwP3pT5S1CScLuEMQcIcg4A5BwB2CgDsE + AXcIAu4QBNwhCLhDwB2CgDsEAXcIAu4QBNwhCLhDEHCHIOAOQcAdgoA7BAF3CLhDEHCHIOAOQcAd + goA7BAF3CALuEATcIQi4QxBwhyDgDgF3CALuEATcIQi4QxBwhyDgDkF30P8CDAD81KaHNg4vYAAA + AABJRU5ErkJggg== + headers: + Accept-Ranges: + - bytes + Access-Control-Allow-Origin: + - https://huggingface.co + Access-Control-Expose-Headers: + - X-Repo-Commit,X-Request-Id,X-Error-Code,X-Error-Message,X-Total-Count,ETag,Link,Accept-Ranges,Content-Range + Connection: + - keep-alive + Content-Disposition: + - inline; filename*=UTF-8''invoice.png; filename="invoice.png"; + Content-Length: + - '24463' + Content-Security-Policy: + - default-src 'none'; sandbox + Content-Type: + - image/png + Date: + - Tue, 29 Oct 2024 18:08:47 GMT + ETag: + - '"78f9c64a3ffcfc436d52d523f3e3c5ad26c14b95"' + Referrer-Policy: + - strict-origin-when-cross-origin + Vary: + - Origin + Via: + - 1.1 95a64b916cc7062be796d476a3fd8d74.cloudfront.net (CloudFront) + X-Amz-Cf-Id: + - XGgOYo6W4cjcEAfak_12dpHQNpq1mfyTVLgMfUDefxrJahUO9SgMxA== + X-Amz-Cf-Pop: + - CCU50-P1 + X-Cache: + - Miss from cloudfront + X-Powered-By: + - huggingface-moon + X-Repo-Commit: + - 2359223c1837a7587402bda0f2643382a6eefeab + X-Request-Id: + - Root=1-672124af-5140bf501e58abb37b48eaa1;5735577d-8655-4f8c-b49f-d052b5794191 + cross-origin-opener-policy: + - same-origin + status: + code: 200 + message: OK +- request: + body: null + headers: + user-agent: + - unknown/None; hf_hub/0.26.2; python/3.9.12; torch/2.4.1 + method: POST + uri: https://api-inference.huggingface.co/models/impira/layoutlm-document-qa + response: + body: + string: '[{"score":0.4251735210418701,"answer":"us-001","start":16,"end":16}]' + headers: + Access-Control-Allow-Credentials: + - 'true' + Connection: + - keep-alive + Content-Length: + - '68' + Content-Type: + - application/json + Date: + - Tue, 29 Oct 2024 18:08:49 GMT + Vary: + - Origin, Access-Control-Request-Method, Access-Control-Request-Headers + x-compute-time: + - '0.521' + x-compute-type: + - cache + x-request-id: + - s4F_wxhPopCHEAMq7L0hh + x-sha: + - beed3c4d02d86017ebca5bd0fdf210046b907aa6 + status: + code: 200 + message: OK +version: 1 diff --git a/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_fill_mask.yaml b/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_fill_mask.yaml new file mode 100644 index 00000000000..3c2bbe55354 --- /dev/null +++ b/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_fill_mask.yaml @@ -0,0 +1,789 @@ +interactions: +- request: + body: null + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate, br, zstd + Connection: + - keep-alive + X-Amzn-Trace-Id: + - 04c3b21c-b31c-4778-9ad5-26de32ae516e + user-agent: + - unknown/None; hf_hub/0.26.2; python/3.9.12; torch/2.4.1 + method: GET + uri: https://huggingface.co/api/tasks + response: + body: + string: "{\"any-to-any\":{\"datasets\":[],\"demo\":{\"inputs\":[],\"outputs\":[]},\"isPlaceholder\":true,\"metrics\":[],\"models\":[],\"spaces\":[],\"summary\":\"\",\"widgetModels\":[],\"id\":\"any-to-any\",\"label\":\"Any-to-Any\",\"libraries\":[\"transformers\"]},\"audio-classification\":{\"datasets\":[{\"description\":\"A + benchmark of 10 different audio tasks.\",\"id\":\"s3prl/superb\"},{\"description\":\"A + dataset of YouTube clips and their sound categories.\",\"id\":\"agkphysics/AudioSet\"}],\"demo\":{\"inputs\":[{\"filename\":\"audio.wav\",\"type\":\"audio\"}],\"outputs\":[{\"data\":[{\"label\":\"Up\",\"score\":0.2},{\"label\":\"Down\",\"score\":0.8}],\"type\":\"chart\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"An + easy-to-use model for command recognition.\",\"id\":\"speechbrain/google_speech_command_xvector\"},{\"description\":\"An + emotion recognition model.\",\"id\":\"ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition\"},{\"description\":\"A + language identification model.\",\"id\":\"facebook/mms-lid-126\"}],\"spaces\":[{\"description\":\"An + application that can classify music into different genre.\",\"id\":\"kurianbenoy/audioclassification\"}],\"summary\":\"Audio + classification is the task of assigning a label or class to a given audio. + It can be used for recognizing which command a user is giving or the emotion + of a statement, as well as identifying a speaker.\",\"widgetModels\":[\"MIT/ast-finetuned-audioset-10-10-0.4593\"],\"youtubeId\":\"KWwzcmG98Ds\",\"id\":\"audio-classification\",\"label\":\"Audio + Classification\",\"libraries\":[\"speechbrain\",\"transformers\",\"transformers.js\"]},\"audio-to-audio\":{\"datasets\":[{\"description\":\"512-element + X-vector embeddings of speakers from CMU ARCTIC dataset.\",\"id\":\"Matthijs/cmu-arctic-xvectors\"}],\"demo\":{\"inputs\":[{\"filename\":\"input.wav\",\"type\":\"audio\"}],\"outputs\":[{\"filename\":\"label-0.wav\",\"type\":\"audio\"},{\"filename\":\"label-1.wav\",\"type\":\"audio\"}]},\"metrics\":[{\"description\":\"The + Signal-to-Noise ratio is the relationship between the target signal level + and the background noise level. It is calculated as the logarithm of the target + signal divided by the background noise, in decibels.\",\"id\":\"snri\"},{\"description\":\"The + Signal-to-Distortion ratio is the relationship between the target signal and + the sum of noise, interference, and artifact errors\",\"id\":\"sdri\"}],\"models\":[{\"description\":\"A + solid model of audio source separation.\",\"id\":\"speechbrain/sepformer-wham\"},{\"description\":\"A + speech enhancement model.\",\"id\":\"ResembleAI/resemble-enhance\"},{\"description\":\"A + model that can change the voice in a speech recording.\",\"id\":\"microsoft/speecht5_vc\"}],\"spaces\":[{\"description\":\"An + application for speech separation.\",\"id\":\"younver/speechbrain-speech-separation\"},{\"description\":\"An + application for audio style transfer.\",\"id\":\"nakas/audio-diffusion_style_transfer\"}],\"summary\":\"Audio-to-Audio + is a family of tasks in which the input is an audio and the output is one + or multiple generated audios. Some example tasks are speech enhancement and + source separation.\",\"widgetModels\":[\"speechbrain/sepformer-wham\"],\"youtubeId\":\"iohj7nCCYoM\",\"id\":\"audio-to-audio\",\"label\":\"Audio-to-Audio\",\"libraries\":[\"asteroid\",\"fairseq\",\"speechbrain\"]},\"automatic-speech-recognition\":{\"datasets\":[{\"description\":\"31,175 + hours of multilingual audio-text dataset in 108 languages.\",\"id\":\"mozilla-foundation/common_voice_17_0\"},{\"description\":\"A + dataset with 44.6k hours of English speaker data and 6k hours of other language + speakers.\",\"id\":\"parler-tts/mls_eng\"},{\"description\":\"A multi-lingual + audio dataset with 370K hours of audio.\",\"id\":\"espnet/yodas\"}],\"demo\":{\"inputs\":[{\"filename\":\"input.flac\",\"type\":\"audio\"}],\"outputs\":[{\"label\":\"Transcript\",\"content\":\"Going + along slushy country roads and speaking to damp audiences in...\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"wer\"},{\"description\":\"\",\"id\":\"cer\"}],\"models\":[{\"description\":\"A + powerful ASR model by OpenAI.\",\"id\":\"openai/whisper-large-v3\"},{\"description\":\"A + good generic speech model by MetaAI for fine-tuning.\",\"id\":\"facebook/w2v-bert-2.0\"},{\"description\":\"An + end-to-end model that performs ASR and Speech Translation by MetaAI.\",\"id\":\"facebook/seamless-m4t-v2-large\"},{\"description\":\"Powerful + speaker diarization model.\",\"id\":\"pyannote/speaker-diarization-3.1\"}],\"spaces\":[{\"description\":\"A + powerful general-purpose speech recognition application.\",\"id\":\"hf-audio/whisper-large-v3\"},{\"description\":\"Fastest + speech recognition application.\",\"id\":\"sanchit-gandhi/whisper-jax\"},{\"description\":\"A + high quality speech and text translation model by Meta.\",\"id\":\"facebook/seamless_m4t\"}],\"summary\":\"Automatic + Speech Recognition (ASR), also known as Speech to Text (STT), is the task + of transcribing a given audio to text. It has many applications, such as voice + user interfaces.\",\"widgetModels\":[\"openai/whisper-large-v3\"],\"youtubeId\":\"TksaY_FDgnk\",\"id\":\"automatic-speech-recognition\",\"label\":\"Automatic + Speech Recognition\",\"libraries\":[\"espnet\",\"nemo\",\"speechbrain\",\"transformers\",\"transformers.js\"]},\"depth-estimation\":{\"datasets\":[{\"description\":\"NYU + Depth V2 Dataset: Video dataset containing both RGB and depth sensor data.\",\"id\":\"sayakpaul/nyu_depth_v2\"},{\"description\":\"Monocular + depth estimation benchmark based without noise and errors.\",\"id\":\"depth-anything/DA-2K\"}],\"demo\":{\"inputs\":[{\"filename\":\"depth-estimation-input.jpg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"depth-estimation-output.png\",\"type\":\"img\"}]},\"metrics\":[],\"models\":[{\"description\":\"Cutting-edge + depth estimation model.\",\"id\":\"depth-anything/Depth-Anything-V2-Large\"},{\"description\":\"A + strong monocular depth estimation model.\",\"id\":\"jingheya/lotus-depth-g-v1-0\"},{\"description\":\"A + depth estimation model that predicts depth in videos.\",\"id\":\"tencent/DepthCrafter\"},{\"description\":\"A + robust depth estimation model.\",\"id\":\"apple/DepthPro\"}],\"spaces\":[{\"description\":\"An + application that predicts the depth of an image and then reconstruct the 3D + model as voxels.\",\"id\":\"radames/dpt-depth-estimation-3d-voxels\"},{\"description\":\"An + application for bleeding-edge depth estimation.\",\"id\":\"akhaliq/depth-pro\"},{\"description\":\"An + application on cutting-edge depth estimation in videos.\",\"id\":\"tencent/DepthCrafter\"},{\"description\":\"A + human-centric depth estimation application.\",\"id\":\"facebook/sapiens-depth\"}],\"summary\":\"Depth + estimation is the task of predicting depth of the objects present in an image.\",\"widgetModels\":[\"\"],\"youtubeId\":\"\",\"id\":\"depth-estimation\",\"label\":\"Depth + Estimation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"document-question-answering\":{\"datasets\":[{\"description\":\"Largest + document understanding dataset.\",\"id\":\"HuggingFaceM4/Docmatix\"},{\"description\":\"Dataset + from the 2020 DocVQA challenge. The documents are taken from the UCSF Industry + Documents Library.\",\"id\":\"eliolio/docvqa\"}],\"demo\":{\"inputs\":[{\"label\":\"Question\",\"content\":\"What + is the idea behind the consumer relations efficiency team?\",\"type\":\"text\"},{\"filename\":\"document-question-answering-input.png\",\"type\":\"img\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"Balance + cost efficiency with quality customer service\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"The + evaluation metric for the DocVQA challenge is the Average Normalized Levenshtein + Similarity (ANLS). This metric is flexible to character regognition errors + and compares the predicted answer with the ground truth answer.\",\"id\":\"anls\"},{\"description\":\"Exact + Match is a metric based on the strict character match of the predicted answer + and the right answer. For answers predicted correctly, the Exact Match will + be 1. Even if only one character is different, Exact Match will be 0\",\"id\":\"exact-match\"}],\"models\":[{\"description\":\"A + robust document question answering model.\",\"id\":\"impira/layoutlm-document-qa\"},{\"description\":\"A + document question answering model specialized in invoices.\",\"id\":\"impira/layoutlm-invoices\"},{\"description\":\"A + special model for OCR-free document question answering.\",\"id\":\"microsoft/udop-large\"},{\"description\":\"A + powerful model for document question answering.\",\"id\":\"google/pix2struct-docvqa-large\"}],\"spaces\":[{\"description\":\"A + robust document question answering application.\",\"id\":\"impira/docquery\"},{\"description\":\"An + application that can answer questions from invoices.\",\"id\":\"impira/invoices\"},{\"description\":\"An + application to compare different document question answering models.\",\"id\":\"merve/compare_docvqa_models\"}],\"summary\":\"Document + Question Answering (also known as Document Visual Question Answering) is the + task of answering questions on document images. Document question answering + models take a (document, question) pair as input and return an answer in natural + language. Models usually rely on multi-modal features, combining text, position + of words (bounding-boxes) and image.\",\"widgetModels\":[\"impira/layoutlm-invoices\"],\"youtubeId\":\"\",\"id\":\"document-question-answering\",\"label\":\"Document + Question Answering\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"feature-extraction\":{\"datasets\":[{\"description\":\"Wikipedia + dataset containing cleaned articles of all languages. Can be used to train + `feature-extraction` models.\",\"id\":\"wikipedia\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"India, + officially the Republic of India, is a country in South Asia.\",\"type\":\"text\"}],\"outputs\":[{\"table\":[[\"Dimension + 1\",\"Dimension 2\",\"Dimension 3\"],[\"2.583383083343506\",\"2.757075071334839\",\"0.9023529887199402\"],[\"8.29393482208252\",\"1.1071064472198486\",\"2.03399395942688\"],[\"-0.7754912972450256\",\"-1.647324562072754\",\"-0.6113331913948059\"],[\"0.07087723910808563\",\"1.5942802429199219\",\"1.4610432386398315\"]],\"type\":\"tabular\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + powerful feature extraction model for natural language processing tasks.\",\"id\":\"thenlper/gte-large\"},{\"description\":\"A + strong feature extraction model for retrieval.\",\"id\":\"Alibaba-NLP/gte-Qwen1.5-7B-instruct\"}],\"spaces\":[{\"description\":\"A + leaderboard to rank text feature extraction models based on a benchmark.\",\"id\":\"mteb/leaderboard\"},{\"description\":\"A + leaderboard to rank best feature extraction models based on human feedback.\",\"id\":\"mteb/arena\"}],\"summary\":\"Feature + extraction is the task of extracting features learnt in a model.\",\"widgetModels\":[\"facebook/bart-base\"],\"id\":\"feature-extraction\",\"label\":\"Feature + Extraction\",\"libraries\":[\"sentence-transformers\",\"transformers\",\"transformers.js\"]},\"fill-mask\":{\"datasets\":[{\"description\":\"A + common dataset that is used to train models for many languages.\",\"id\":\"wikipedia\"},{\"description\":\"A + large English dataset with text crawled from the web.\",\"id\":\"c4\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"The + barked at me\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"wolf\",\"score\":0.487},{\"label\":\"dog\",\"score\":0.061},{\"label\":\"cat\",\"score\":0.058},{\"label\":\"fox\",\"score\":0.047},{\"label\":\"squirrel\",\"score\":0.025}]}]},\"metrics\":[{\"description\":\"Cross + Entropy is a metric that calculates the difference between two probability + distributions. Each probability distribution is the distribution of predicted + words\",\"id\":\"cross_entropy\"},{\"description\":\"Perplexity is the exponential + of the cross-entropy loss. It evaluates the probabilities assigned to the + next word by the model. Lower perplexity indicates better performance\",\"id\":\"perplexity\"}],\"models\":[{\"description\":\"The + famous BERT model.\",\"id\":\"google-bert/bert-base-uncased\"},{\"description\":\"A + multilingual model trained on 100 languages.\",\"id\":\"FacebookAI/xlm-roberta-base\"}],\"spaces\":[],\"summary\":\"Masked + language modeling is the task of masking some of the words in a sentence and + predicting which words should replace those masks. These models are useful + when we want to get a statistical understanding of the language in which the + model is trained in.\",\"widgetModels\":[\"distilroberta-base\"],\"youtubeId\":\"mqElG5QJWUg\",\"id\":\"fill-mask\",\"label\":\"Fill-Mask\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"image-classification\":{\"datasets\":[{\"description\":\"Benchmark + dataset used for image classification with images that belong to 100 classes.\",\"id\":\"cifar100\"},{\"description\":\"Dataset + consisting of images of garments.\",\"id\":\"fashion_mnist\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-classification-input.jpeg\",\"type\":\"img\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Egyptian + cat\",\"score\":0.514},{\"label\":\"Tabby cat\",\"score\":0.193},{\"label\":\"Tiger + cat\",\"score\":0.068}]}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + strong image classification model.\",\"id\":\"google/vit-base-patch16-224\"},{\"description\":\"A + robust image classification model.\",\"id\":\"facebook/deit-base-distilled-patch16-224\"},{\"description\":\"A + strong image classification model.\",\"id\":\"facebook/convnext-large-224\"}],\"spaces\":[{\"description\":\"An + application that classifies what a given image is about.\",\"id\":\"nielsr/perceiver-image-classification\"}],\"summary\":\"Image + classification is the task of assigning a label or class to an entire image. + Images are expected to have only one class for each image. Image classification + models take an image as input and return a prediction about which class the + image belongs to.\",\"widgetModels\":[\"google/vit-base-patch16-224\"],\"youtubeId\":\"tjAIM7BOYhw\",\"id\":\"image-classification\",\"label\":\"Image + Classification\",\"libraries\":[\"keras\",\"timm\",\"transformers\",\"transformers.js\"]},\"image-feature-extraction\":{\"datasets\":[{\"description\":\"ImageNet-1K + is a image classification dataset in which images are used to train image-feature-extraction + models.\",\"id\":\"imagenet-1k\"}],\"demo\":{\"inputs\":[{\"filename\":\"mask-generation-input.png\",\"type\":\"img\"}],\"outputs\":[{\"table\":[[\"Dimension + 1\",\"Dimension 2\",\"Dimension 3\"],[\"0.21236686408519745\",\"1.0919708013534546\",\"0.8512550592422485\"],[\"0.809657871723175\",\"-0.18544459342956543\",\"-0.7851548194885254\"],[\"1.3103108406066895\",\"-0.2479034662246704\",\"-0.9107287526130676\"],[\"1.8536205291748047\",\"-0.36419737339019775\",\"0.09717650711536407\"]],\"type\":\"tabular\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + powerful image feature extraction model.\",\"id\":\"timm/vit_large_patch14_dinov2.lvd142m\"},{\"description\":\"A + strong image feature extraction model.\",\"id\":\"nvidia/MambaVision-T-1K\"},{\"description\":\"A + robust image feature extraction model.\",\"id\":\"facebook/dino-vitb16\"},{\"description\":\"Strong + image feature extraction model made for information retrieval from documents.\",\"id\":\"vidore/colpali\"},{\"description\":\"Strong + image feature extraction model that can be used on images and documents.\",\"id\":\"OpenGVLab/InternViT-6B-448px-V1-2\"}],\"spaces\":[],\"summary\":\"Image + feature extraction is the task of extracting features learnt in a computer + vision model.\",\"widgetModels\":[],\"id\":\"image-feature-extraction\",\"label\":\"Image + Feature Extraction\",\"libraries\":[\"timm\",\"transformers\"]},\"image-segmentation\":{\"datasets\":[{\"description\":\"Scene + segmentation dataset.\",\"id\":\"scene_parse_150\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-segmentation-input.jpeg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"image-segmentation-output.png\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"Average + Precision (AP) is the Area Under the PR Curve (AUC-PR). It is calculated for + each semantic class separately\",\"id\":\"Average Precision\"},{\"description\":\"Mean + Average Precision (mAP) is the overall average of the AP values\",\"id\":\"Mean + Average Precision\"},{\"description\":\"Intersection over Union (IoU) is the + overlap of segmentation masks. Mean IoU is the average of the IoU of all semantic + classes\",\"id\":\"Mean Intersection over Union\"},{\"description\":\"AP\u03B1 + is the Average Precision at the IoU threshold of a \u03B1 value, for example, + AP50 and AP75\",\"id\":\"AP\u03B1\"}],\"models\":[{\"description\":\"Solid + semantic segmentation model trained on ADE20k.\",\"id\":\"openmmlab/upernet-convnext-small\"},{\"description\":\"Background + removal model.\",\"id\":\"briaai/RMBG-1.4\"},{\"description\":\"A multipurpose + image segmentation model for high resolution images.\",\"id\":\"ZhengPeng7/BiRefNet\"},{\"description\":\"Powerful + human-centric image segmentation model.\",\"id\":\"facebook/sapiens-seg-1b\"},{\"description\":\"Panoptic + segmentation model trained on the COCO (common objects) dataset.\",\"id\":\"facebook/mask2former-swin-large-coco-panoptic\"}],\"spaces\":[{\"description\":\"A + semantic segmentation application that can predict unseen instances out of + the box.\",\"id\":\"facebook/ov-seg\"},{\"description\":\"One of the strongest + segmentation applications.\",\"id\":\"jbrinkma/segment-anything\"},{\"description\":\"A + human-centric segmentation model.\",\"id\":\"facebook/sapiens-pose\"},{\"description\":\"An + instance segmentation application to predict neuronal cell types from microscopy + images.\",\"id\":\"rashmi/sartorius-cell-instance-segmentation\"},{\"description\":\"An + application that segments videos.\",\"id\":\"ArtGAN/Segment-Anything-Video\"},{\"description\":\"An + panoptic segmentation application built for outdoor environments.\",\"id\":\"segments/panoptic-segment-anything\"}],\"summary\":\"Image + Segmentation divides an image into segments where each pixel in the image + is mapped to an object. This task has multiple variants such as instance segmentation, + panoptic segmentation and semantic segmentation.\",\"widgetModels\":[\"nvidia/segformer-b0-finetuned-ade-512-512\"],\"youtubeId\":\"dKE8SIt9C-w\",\"id\":\"image-segmentation\",\"label\":\"Image + Segmentation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"image-to-image\":{\"datasets\":[{\"description\":\"Synthetic + dataset, for image relighting\",\"id\":\"VIDIT\"},{\"description\":\"Multiple + images of celebrities, used for facial expression translation\",\"id\":\"huggan/CelebA-faces\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-to-image-input.jpeg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"image-to-image-output.png\",\"type\":\"img\"}]},\"isPlaceholder\":false,\"metrics\":[{\"description\":\"Peak + Signal to Noise Ratio (PSNR) is an approximation of the human perception, + considering the ratio of the absolute intensity with respect to the variations. + Measured in dB, a high value indicates a high fidelity.\",\"id\":\"PSNR\"},{\"description\":\"Structural + Similarity Index (SSIM) is a perceptual metric which compares the luminance, + contrast and structure of two images. The values of SSIM range between -1 + and 1, and higher values indicate closer resemblance to the original image.\",\"id\":\"SSIM\"},{\"description\":\"Inception + Score (IS) is an analysis of the labels predicted by an image classification + model when presented with a sample of the generated images.\",\"id\":\"IS\"}],\"models\":[{\"description\":\"An + image-to-image model to improve image resolution.\",\"id\":\"fal/AuraSR-v2\"},{\"description\":\"A + model that increases the resolution of an image.\",\"id\":\"keras-io/super-resolution\"},{\"description\":\"A + model that creates a set of variations of the input image in the style of + DALL-E using Stable Diffusion.\",\"id\":\"lambdalabs/sd-image-variations-diffusers\"},{\"description\":\"A + model that generates images based on segments in the input image and the text + prompt.\",\"id\":\"mfidabel/controlnet-segment-anything\"},{\"description\":\"A + model that takes an image and an instruction to edit the image.\",\"id\":\"timbrooks/instruct-pix2pix\"}],\"spaces\":[{\"description\":\"Image + enhancer application for low light.\",\"id\":\"keras-io/low-light-image-enhancement\"},{\"description\":\"Style + transfer application.\",\"id\":\"keras-io/neural-style-transfer\"},{\"description\":\"An + application that generates images based on segment control.\",\"id\":\"mfidabel/controlnet-segment-anything\"},{\"description\":\"Image + generation application that takes image control and text prompt.\",\"id\":\"hysts/ControlNet\"},{\"description\":\"Colorize + any image using this app.\",\"id\":\"ioclab/brightness-controlnet\"},{\"description\":\"Edit + images with instructions.\",\"id\":\"timbrooks/instruct-pix2pix\"}],\"summary\":\"Image-to-image + is the task of transforming an input image through a variety of possible manipulations + and enhancements, such as super-resolution, image inpainting, colorization, + and more.\",\"widgetModels\":[\"stabilityai/stable-diffusion-2-inpainting\"],\"youtubeId\":\"\",\"id\":\"image-to-image\",\"label\":\"Image-to-Image\",\"libraries\":[\"diffusers\",\"transformers\",\"transformers.js\"]},\"image-text-to-text\":{\"datasets\":[{\"description\":\"Instructions + composed of image and text.\",\"id\":\"liuhaotian/LLaVA-Instruct-150K\"},{\"description\":\"Conversation + turns where questions involve image and text.\",\"id\":\"liuhaotian/LLaVA-Pretrain\"},{\"description\":\"A + collection of datasets made for model fine-tuning.\",\"id\":\"HuggingFaceM4/the_cauldron\"},{\"description\":\"Screenshots + of websites with their HTML/CSS codes.\",\"id\":\"HuggingFaceM4/WebSight\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-text-to-text-input.png\",\"type\":\"img\"},{\"label\":\"Text + Prompt\",\"content\":\"Describe the position of the bee in detail.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"The + bee is sitting on a pink flower, surrounded by other flowers. The bee is positioned + in the center of the flower, with its head and front legs sticking out.\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"Powerful + vision language model with great visual understanding and reasoning capabilities.\",\"id\":\"meta-llama/Llama-3.2-11B-Vision-Instruct\"},{\"description\":\"Cutting-edge + vision language models.\",\"id\":\"allenai/Molmo-7B-D-0924\"},{\"description\":\"Small + yet powerful model.\",\"id\":\"vikhyatk/moondream2\"},{\"description\":\"Strong + image-text-to-text model.\",\"id\":\"Qwen/Qwen2-VL-7B-Instruct\"},{\"description\":\"Strong + image-text-to-text model.\",\"id\":\"mistralai/Pixtral-12B-2409\"},{\"description\":\"Strong + image-text-to-text model focused on documents.\",\"id\":\"stepfun-ai/GOT-OCR2_0\"}],\"spaces\":[{\"description\":\"Leaderboard + to evaluate vision language models.\",\"id\":\"opencompass/open_vlm_leaderboard\"},{\"description\":\"Vision + language models arena, where models are ranked by votes of users.\",\"id\":\"WildVision/vision-arena\"},{\"description\":\"Powerful + vision-language model assistant.\",\"id\":\"akhaliq/Molmo-7B-D-0924\"},{\"description\":\"An + image-text-to-text application focused on documents.\",\"id\":\"stepfun-ai/GOT_official_online_demo\"},{\"description\":\"An + application to compare outputs of different vision language models.\",\"id\":\"merve/compare_VLMs\"},{\"description\":\"An + application for chatting with an image-text-to-text model.\",\"id\":\"GanymedeNil/Qwen2-VL-7B\"}],\"summary\":\"Image-text-to-text + models take in an image and text prompt and output text. These models are + also called vision-language models, or VLMs. The difference from image-to-text + models is that these models take an additional text input, not restricting + the model to certain use cases like image captioning, and may also be trained + to accept a conversation as input.\",\"widgetModels\":[\"meta-llama/Llama-3.2-11B-Vision-Instruct\"],\"youtubeId\":\"IoGaGfU1CIg\",\"id\":\"image-text-to-text\",\"label\":\"Image-Text-to-Text\",\"libraries\":[\"transformers\"]},\"image-to-text\":{\"datasets\":[{\"description\":\"Dataset + from 12M image-text of Reddit\",\"id\":\"red_caps\"},{\"description\":\"Dataset + from 3.3M images of Google\",\"id\":\"datasets/conceptual_captions\"}],\"demo\":{\"inputs\":[{\"filename\":\"savanna.jpg\",\"type\":\"img\"}],\"outputs\":[{\"label\":\"Detailed + description\",\"content\":\"a herd of giraffes and zebras grazing in a field\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + robust image captioning model.\",\"id\":\"Salesforce/blip2-opt-2.7b\"},{\"description\":\"A + powerful and accurate image-to-text model that can also localize concepts + in images.\",\"id\":\"microsoft/kosmos-2-patch14-224\"},{\"description\":\"A + strong optical character recognition model.\",\"id\":\"facebook/nougat-base\"},{\"description\":\"A + powerful model that lets you have a conversation with the image.\",\"id\":\"llava-hf/llava-1.5-7b-hf\"}],\"spaces\":[{\"description\":\"An + application that compares various image captioning models.\",\"id\":\"nielsr/comparing-captioning-models\"},{\"description\":\"A + robust image captioning application.\",\"id\":\"flax-community/image-captioning\"},{\"description\":\"An + application that transcribes handwritings into text.\",\"id\":\"nielsr/TrOCR-handwritten\"},{\"description\":\"An + application that can caption images and answer questions about a given image.\",\"id\":\"Salesforce/BLIP\"},{\"description\":\"An + application that can caption images and answer questions with a conversational + agent.\",\"id\":\"Salesforce/BLIP2\"},{\"description\":\"An image captioning + application that demonstrates the effect of noise on captions.\",\"id\":\"johko/capdec-image-captioning\"}],\"summary\":\"Image + to text models output a text from a given image. Image captioning or optical + character recognition can be considered as the most common applications of + image to text.\",\"widgetModels\":[\"Salesforce/blip-image-captioning-large\"],\"youtubeId\":\"\",\"id\":\"image-to-text\",\"label\":\"Image-to-Text\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"keypoint-detection\":{\"datasets\":[{\"description\":\"A + dataset of hand keypoints of over 500k examples.\",\"id\":\"Vincent-luo/hagrid-mediapipe-hands\"}],\"demo\":{\"inputs\":[{\"filename\":\"keypoint-detection-input.png\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"keypoint-detection-output.png\",\"type\":\"img\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + robust keypoint detection model.\",\"id\":\"magic-leap-community/superpoint\"},{\"description\":\"Strong + keypoint detection model used to detect human pose.\",\"id\":\"facebook/sapiens-pose-1b\"}],\"spaces\":[{\"description\":\"An + application that detects hand keypoints in real-time.\",\"id\":\"datasciencedojo/Hand-Keypoint-Detection-Realtime\"},{\"description\":\"An + application to try a universal keypoint detection model.\",\"id\":\"merve/SuperPoint\"}],\"summary\":\"Keypoint + detection is the task of identifying meaningful distinctive points or features + in an image.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"keypoint-detection\",\"label\":\"Keypoint + Detection\",\"libraries\":[\"transformers\"]},\"mask-generation\":{\"datasets\":[],\"demo\":{\"inputs\":[{\"filename\":\"mask-generation-input.png\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"mask-generation-output.png\",\"type\":\"img\"}]},\"metrics\":[],\"models\":[{\"description\":\"Small + yet powerful mask generation model.\",\"id\":\"Zigeng/SlimSAM-uniform-50\"},{\"description\":\"Very + strong mask generation model.\",\"id\":\"facebook/sam2-hiera-large\"}],\"spaces\":[{\"description\":\"An + application that combines a mask generation model with a zero-shot object + detection model for text-guided image segmentation.\",\"id\":\"merve/OWLSAM2\"},{\"description\":\"An + application that compares the performance of a large and a small mask generation + model.\",\"id\":\"merve/slimsam\"},{\"description\":\"An application based + on an improved mask generation model.\",\"id\":\"SkalskiP/segment-anything-model-2\"},{\"description\":\"An + application to remove objects from videos using mask generation models.\",\"id\":\"SkalskiP/SAM_and_ProPainter\"}],\"summary\":\"Mask + generation is the task of generating masks that identify a specific object + or region of interest in a given image. Masks are often used in segmentation + tasks, where they provide a precise way to isolate the object of interest + for further processing or analysis.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"mask-generation\",\"label\":\"Mask + Generation\",\"libraries\":[\"transformers\"]},\"object-detection\":{\"datasets\":[{\"description\":\"Widely + used benchmark dataset for multiple vision tasks.\",\"id\":\"merve/coco2017\"},{\"description\":\"Multi-task + computer vision benchmark.\",\"id\":\"merve/pascal-voc\"}],\"demo\":{\"inputs\":[{\"filename\":\"object-detection-input.jpg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"object-detection-output.jpg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + Average Precision (AP) metric is the Area Under the PR Curve (AUC-PR). It + is calculated for each class separately\",\"id\":\"Average Precision\"},{\"description\":\"The + Mean Average Precision (mAP) metric is the overall average of the AP values\",\"id\":\"Mean + Average Precision\"},{\"description\":\"The AP\u03B1 metric is the Average + Precision at the IoU threshold of a \u03B1 value, for example, AP50 and AP75\",\"id\":\"AP\u03B1\"}],\"models\":[{\"description\":\"Solid + object detection model pre-trained on the COCO 2017 dataset.\",\"id\":\"facebook/detr-resnet-50\"},{\"description\":\"Real-time + and accurate object detection model.\",\"id\":\"jameslahm/yolov10x\"},{\"description\":\"Fast + and accurate object detection model trained on COCO and Object365 datasets.\",\"id\":\"PekingU/rtdetr_r18vd_coco_o365\"}],\"spaces\":[{\"description\":\"Leaderboard + to compare various object detection models across several metrics.\",\"id\":\"hf-vision/object_detection_leaderboard\"},{\"description\":\"An + application that contains various object detection models to try from.\",\"id\":\"Gradio-Blocks/Object-Detection-With-DETR-and-YOLOS\"},{\"description\":\"An + application that shows multiple cutting edge techniques for object detection + and tracking.\",\"id\":\"kadirnar/torchyolo\"},{\"description\":\"An object + tracking, segmentation and inpainting application.\",\"id\":\"VIPLab/Track-Anything\"},{\"description\":\"Very + fast object tracking application based on object detection.\",\"id\":\"merve/RT-DETR-tracking-coco\"}],\"summary\":\"Object + Detection models allow users to identify objects of certain defined classes. + Object detection models receive an image as input and output the images with + bounding boxes and labels on detected objects.\",\"widgetModels\":[\"facebook/detr-resnet-50\"],\"youtubeId\":\"WdAeKSOpxhw\",\"id\":\"object-detection\",\"label\":\"Object + Detection\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"video-classification\":{\"datasets\":[{\"description\":\"Benchmark + dataset used for video classification with videos that belong to 400 classes.\",\"id\":\"kinetics400\"}],\"demo\":{\"inputs\":[{\"filename\":\"video-classification-input.gif\",\"type\":\"img\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Playing + Guitar\",\"score\":0.514},{\"label\":\"Playing Tennis\",\"score\":0.193},{\"label\":\"Cooking\",\"score\":0.068}]}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"Strong + Video Classification model trained on the Kinetics 400 dataset.\",\"id\":\"google/vivit-b-16x2-kinetics400\"},{\"description\":\"Strong + Video Classification model trained on the Kinetics 400 dataset.\",\"id\":\"microsoft/xclip-base-patch32\"}],\"spaces\":[{\"description\":\"An + application that classifies video at different timestamps.\",\"id\":\"nateraw/lavila\"},{\"description\":\"An + application that classifies video.\",\"id\":\"fcakyon/video-classification\"}],\"summary\":\"Video + classification is the task of assigning a label or class to an entire video. + Videos are expected to have only one class for each video. Video classification + models take a video as input and return a prediction about which class the + video belongs to.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"video-classification\",\"label\":\"Video + Classification\",\"libraries\":[\"transformers\"]},\"question-answering\":{\"datasets\":[{\"description\":\"A + famous question answering dataset based on English articles from Wikipedia.\",\"id\":\"squad_v2\"},{\"description\":\"A + dataset of aggregated anonymized actual queries issued to the Google search + engine.\",\"id\":\"natural_questions\"}],\"demo\":{\"inputs\":[{\"label\":\"Question\",\"content\":\"Which + name is also used to describe the Amazon rainforest in English?\",\"type\":\"text\"},{\"label\":\"Context\",\"content\":\"The + Amazon rainforest, also known in English as Amazonia or the Amazon Jungle\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"Amazonia\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Exact + Match is a metric based on the strict character match of the predicted answer + and the right answer. For answers predicted correctly, the Exact Match will + be 1. Even if only one character is different, Exact Match will be 0\",\"id\":\"exact-match\"},{\"description\":\" + The F1-Score metric is useful if we value both false positives and false negatives + equally. The F1-Score is calculated on each word in the predicted sequence + against the correct answer\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + robust baseline model for most question answering domains.\",\"id\":\"deepset/roberta-base-squad2\"},{\"description\":\"Small + yet robust model that can answer questions.\",\"id\":\"distilbert/distilbert-base-cased-distilled-squad\"},{\"description\":\"A + special model that can answer questions from tables.\",\"id\":\"google/tapas-base-finetuned-wtq\"}],\"spaces\":[{\"description\":\"An + application that can answer a long question from Wikipedia.\",\"id\":\"deepset/wikipedia-assistant\"}],\"summary\":\"Question + Answering models can retrieve the answer to a question from a given text, + which is useful for searching for an answer in a document. Some question answering + models can generate answers without context!\",\"widgetModels\":[\"deepset/roberta-base-squad2\"],\"youtubeId\":\"ajPx5LwJD-I\",\"id\":\"question-answering\",\"label\":\"Question + Answering\",\"libraries\":[\"adapter-transformers\",\"allennlp\",\"transformers\",\"transformers.js\"]},\"reinforcement-learning\":{\"datasets\":[{\"description\":\"A + curation of widely used datasets for Data Driven Deep Reinforcement Learning + (D4RL)\",\"id\":\"edbeeching/decision_transformer_gym_replay\"}],\"demo\":{\"inputs\":[{\"label\":\"State\",\"content\":\"Red + traffic light, pedestrians are about to pass.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Action\",\"content\":\"Stop + the car.\",\"type\":\"text\"},{\"label\":\"Next State\",\"content\":\"Yellow + light, pedestrians have crossed.\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Accumulated + reward across all time steps discounted by a factor that ranges between 0 + and 1 and determines how much the agent optimizes for future relative to immediate + rewards. Measures how good is the policy ultimately found by a given algorithm + considering uncertainty over the future.\",\"id\":\"Discounted Total Reward\"},{\"description\":\"Average + return obtained after running the policy for a certain number of evaluation + episodes. As opposed to total reward, mean reward considers how much reward + a given algorithm receives while learning.\",\"id\":\"Mean Reward\"},{\"description\":\"Measures + how good a given algorithm is after a predefined time. Some algorithms may + be guaranteed to converge to optimal behavior across many time steps. However, + an agent that reaches an acceptable level of optimality after a given time + horizon may be preferable to one that ultimately reaches optimality but takes + a long time.\",\"id\":\"Level of Performance After Some Time\"}],\"models\":[{\"description\":\"A + Reinforcement Learning model trained on expert data from the Gym Hopper environment\",\"id\":\"edbeeching/decision-transformer-gym-hopper-expert\"},{\"description\":\"A + PPO agent playing seals/CartPole-v0 using the stable-baselines3 library and + the RL Zoo.\",\"id\":\"HumanCompatibleAI/ppo-seals-CartPole-v0\"}],\"spaces\":[{\"description\":\"An + application for a cute puppy agent learning to catch a stick.\",\"id\":\"ThomasSimonini/Huggy\"},{\"description\":\"An + application to play Snowball Fight with a reinforcement learning agent.\",\"id\":\"ThomasSimonini/SnowballFight\"}],\"summary\":\"Reinforcement + learning is the computational approach of learning from action by interacting + with an environment through trial and error and receiving rewards (negative + or positive) as feedback\",\"widgetModels\":[],\"youtubeId\":\"q0BiUn5LiBc\",\"id\":\"reinforcement-learning\",\"label\":\"Reinforcement + Learning\",\"libraries\":[\"transformers\",\"stable-baselines3\",\"ml-agents\",\"sample-factory\"]},\"sentence-similarity\":{\"datasets\":[{\"description\":\"Bing + queries with relevant passages from various web sources.\",\"id\":\"ms_marco\"}],\"demo\":{\"inputs\":[{\"label\":\"Source + sentence\",\"content\":\"Machine learning is so easy.\",\"type\":\"text\"},{\"label\":\"Sentences + to compare to\",\"content\":\"Deep learning is so straightforward.\",\"type\":\"text\"},{\"label\":\"\",\"content\":\"This + is so difficult, like rocket science.\",\"type\":\"text\"},{\"label\":\"\",\"content\":\"I + can't believe how much I struggled with this.\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Deep + learning is so straightforward.\",\"score\":0.623},{\"label\":\"This is so + difficult, like rocket science.\",\"score\":0.413},{\"label\":\"I can't believe + how much I struggled with this.\",\"score\":0.256}]}]},\"metrics\":[{\"description\":\"Reciprocal + Rank is a measure used to rank the relevancy of documents given a set of documents. + Reciprocal Rank is the reciprocal of the rank of the document retrieved, meaning, + if the rank is 3, the Reciprocal Rank is 0.33. If the rank is 1, the Reciprocal + Rank is 1\",\"id\":\"Mean Reciprocal Rank\"},{\"description\":\"The similarity + of the embeddings is evaluated mainly on cosine similarity. It is calculated + as the cosine of the angle between two vectors. It is particularly useful + when your texts are not the same length\",\"id\":\"Cosine Similarity\"}],\"models\":[{\"description\":\"This + model works well for sentences and paragraphs and can be used for clustering/grouping + and semantic searches.\",\"id\":\"sentence-transformers/all-mpnet-base-v2\"},{\"description\":\"A + multilingual robust sentence similarity model..\",\"id\":\"BAAI/bge-m3\"}],\"spaces\":[{\"description\":\"An + application that leverages sentence similarity to answer questions from YouTube + videos.\",\"id\":\"Gradio-Blocks/Ask_Questions_To_YouTube_Videos\"},{\"description\":\"An + application that retrieves relevant PubMed abstracts for a given online article + which can be used as further references.\",\"id\":\"Gradio-Blocks/pubmed-abstract-retriever\"},{\"description\":\"An + application that leverages sentence similarity to summarize text.\",\"id\":\"nickmuchi/article-text-summarizer\"},{\"description\":\"A + guide that explains how Sentence Transformers can be used for semantic search.\",\"id\":\"sentence-transformers/Sentence_Transformers_for_semantic_search\"}],\"summary\":\"Sentence + Similarity is the task of determining how similar two texts are. Sentence + similarity models convert input texts into vectors (embeddings) that capture + semantic information and calculate how close (similar) they are between them. + This task is particularly useful for information retrieval and clustering/grouping.\",\"widgetModels\":[\"BAAI/bge-small-en-v1.5\"],\"youtubeId\":\"VCZq5AkbNEU\",\"id\":\"sentence-similarity\",\"label\":\"Sentence + Similarity\",\"libraries\":[\"sentence-transformers\",\"spacy\",\"transformers.js\"]},\"summarization\":{\"canonicalId\":\"text2text-generation\",\"datasets\":[{\"description\":\"News + articles in five different languages along with their summaries. Widely used + for benchmarking multilingual summarization models.\",\"id\":\"mlsum\"},{\"description\":\"English + conversations and their summaries. Useful for benchmarking conversational + agents.\",\"id\":\"samsum\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"The + tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey + building, and the tallest structure in Paris. Its base is square, measuring + 125 metres (410 ft) on each side. It was the first structure to reach a height + of 300 metres. Excluding transmitters, the Eiffel Tower is the second tallest + free-standing structure in France after the Millau Viaduct.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Output\",\"content\":\"The + tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey + building. It was the first structure to reach a height of 300 metres.\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"The + generated sequence is compared against its summary, and the overlap of tokens + are counted. ROUGE-N refers to overlap of N subsequent tokens, ROUGE-1 refers + to overlap of single tokens and ROUGE-2 is the overlap of two subsequent tokens.\",\"id\":\"rouge\"}],\"models\":[{\"description\":\"A + strong summarization model trained on English news articles. Excels at generating + factual summaries.\",\"id\":\"facebook/bart-large-cnn\"},{\"description\":\"A + summarization model trained on medical articles.\",\"id\":\"Falconsai/medical_summarization\"}],\"spaces\":[{\"description\":\"An + application that can summarize long paragraphs.\",\"id\":\"pszemraj/summarize-long-text\"},{\"description\":\"A + much needed summarization application for terms and conditions.\",\"id\":\"ml6team/distilbart-tos-summarizer-tosdr\"},{\"description\":\"An + application that summarizes long documents.\",\"id\":\"pszemraj/document-summarization\"},{\"description\":\"An + application that can detect errors in abstractive summarization.\",\"id\":\"ml6team/post-processing-summarization\"}],\"summary\":\"Summarization + is the task of producing a shorter version of a document while preserving + its important information. Some models can extract text from the original + input, while other models can generate entirely new text.\",\"widgetModels\":[\"facebook/bart-large-cnn\"],\"youtubeId\":\"yHnr5Dk2zCI\",\"id\":\"summarization\",\"label\":\"Summarization\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"table-question-answering\":{\"datasets\":[{\"description\":\"The + WikiTableQuestions dataset is a large-scale dataset for the task of question + answering on semi-structured tables.\",\"id\":\"wikitablequestions\"},{\"description\":\"WikiSQL + is a dataset of 80654 hand-annotated examples of questions and SQL queries + distributed across 24241 tables from Wikipedia.\",\"id\":\"wikisql\"}],\"demo\":{\"inputs\":[{\"table\":[[\"Rank\",\"Name\",\"No.of + reigns\",\"Combined days\"],[\"1\",\"lou Thesz\",\"3\",\"3749\"],[\"2\",\"Ric + Flair\",\"8\",\"3103\"],[\"3\",\"Harley Race\",\"7\",\"1799\"]],\"type\":\"tabular\"},{\"label\":\"Question\",\"content\":\"What + is the number of reigns for Harley Race?\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Result\",\"content\":\"7\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Checks + whether the predicted answer(s) is the same as the ground-truth answer(s).\",\"id\":\"Denotation + Accuracy\"}],\"models\":[{\"description\":\"A table question answering model + that is capable of neural SQL execution, i.e., employ TAPEX to execute a SQL + query on a given table.\",\"id\":\"microsoft/tapex-base\"},{\"description\":\"A + robust table question answering model.\",\"id\":\"google/tapas-base-finetuned-wtq\"}],\"spaces\":[{\"description\":\"An + application that answers questions based on table CSV files.\",\"id\":\"katanaml/table-query\"}],\"summary\":\"Table + Question Answering (Table QA) is the answering a question about an information + on a given table.\",\"widgetModels\":[\"google/tapas-base-finetuned-wtq\"],\"id\":\"table-question-answering\",\"label\":\"Table + Question Answering\",\"libraries\":[\"transformers\"]},\"tabular-classification\":{\"datasets\":[{\"description\":\"A + comprehensive curation of datasets covering all benchmarks.\",\"id\":\"inria-soda/tabular-benchmark\"}],\"demo\":{\"inputs\":[{\"table\":[[\"Glucose\",\"Blood + Pressure \",\"Skin Thickness\",\"Insulin\",\"BMI\"],[\"148\",\"72\",\"35\",\"0\",\"33.6\"],[\"150\",\"50\",\"30\",\"0\",\"35.1\"],[\"141\",\"60\",\"29\",\"1\",\"39.2\"]],\"type\":\"tabular\"}],\"outputs\":[{\"table\":[[\"Diabetes\"],[\"1\"],[\"1\"],[\"0\"]],\"type\":\"tabular\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"Breast + cancer prediction model based on decision trees.\",\"id\":\"scikit-learn/cancer-prediction-trees\"}],\"spaces\":[{\"description\":\"An + application that can predict defective products on a production line.\",\"id\":\"scikit-learn/tabular-playground\"},{\"description\":\"An + application that compares various tabular classification techniques on different + datasets.\",\"id\":\"scikit-learn/classification\"}],\"summary\":\"Tabular + classification is the task of classifying a target category (a group) based + on set of attributes.\",\"widgetModels\":[\"scikit-learn/tabular-playground\"],\"youtubeId\":\"\",\"id\":\"tabular-classification\",\"label\":\"Tabular + Classification\",\"libraries\":[\"sklearn\"]},\"tabular-regression\":{\"datasets\":[{\"description\":\"A + comprehensive curation of datasets covering all benchmarks.\",\"id\":\"inria-soda/tabular-benchmark\"}],\"demo\":{\"inputs\":[{\"table\":[[\"Car + Name\",\"Horsepower\",\"Weight\"],[\"ford torino\",\"140\",\"3,449\"],[\"amc + hornet\",\"97\",\"2,774\"],[\"toyota corolla\",\"65\",\"1,773\"]],\"type\":\"tabular\"}],\"outputs\":[{\"table\":[[\"MPG + (miles per gallon)\"],[\"17\"],[\"18\"],[\"31\"]],\"type\":\"tabular\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"mse\"},{\"description\":\"Coefficient + of determination (or R-squared) is a measure of how well the model fits the + data. Higher R-squared is considered a better fit.\",\"id\":\"r-squared\"}],\"models\":[{\"description\":\"Fish + weight prediction based on length measurements and species.\",\"id\":\"scikit-learn/Fish-Weight\"}],\"spaces\":[{\"description\":\"An + application that can predict weight of a fish based on set of attributes.\",\"id\":\"scikit-learn/fish-weight-prediction\"}],\"summary\":\"Tabular + regression is the task of predicting a numerical value given a set of attributes.\",\"widgetModels\":[\"scikit-learn/Fish-Weight\"],\"youtubeId\":\"\",\"id\":\"tabular-regression\",\"label\":\"Tabular + Regression\",\"libraries\":[\"sklearn\"]},\"text-classification\":{\"datasets\":[{\"description\":\"A + widely used dataset used to benchmark multiple variants of text classification.\",\"id\":\"nyu-mll/glue\"},{\"description\":\"A + text classification dataset used to benchmark natural language inference models\",\"id\":\"stanfordnlp/snli\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"I + love Hugging Face!\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"POSITIVE\",\"score\":0.9},{\"label\":\"NEUTRAL\",\"score\":0.1},{\"label\":\"NEGATIVE\",\"score\":0}]}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"The + F1 metric is the harmonic mean of the precision and recall. It can be calculated + as: F1 = 2 * (precision * recall) / (precision + recall)\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + robust model trained for sentiment analysis.\",\"id\":\"distilbert/distilbert-base-uncased-finetuned-sst-2-english\"},{\"description\":\"A + sentiment analysis model specialized in financial sentiment.\",\"id\":\"ProsusAI/finbert\"},{\"description\":\"A + sentiment analysis model specialized in analyzing tweets.\",\"id\":\"cardiffnlp/twitter-roberta-base-sentiment-latest\"},{\"description\":\"A + model that can classify languages.\",\"id\":\"papluca/xlm-roberta-base-language-detection\"},{\"description\":\"A + model that can classify text generation attacks.\",\"id\":\"meta-llama/Prompt-Guard-86M\"}],\"spaces\":[{\"description\":\"An + application that can classify financial sentiment.\",\"id\":\"IoannisTr/Tech_Stocks_Trading_Assistant\"},{\"description\":\"A + dashboard that contains various text classification tasks.\",\"id\":\"miesnerjacob/Multi-task-NLP\"},{\"description\":\"An + application that analyzes user reviews in healthcare.\",\"id\":\"spacy/healthsea-demo\"}],\"summary\":\"Text + Classification is the task of assigning a label or class to a given text. + Some use cases are sentiment analysis, natural language inference, and assessing + grammatical correctness.\",\"widgetModels\":[\"distilbert/distilbert-base-uncased-finetuned-sst-2-english\"],\"youtubeId\":\"leNG9fN9FQU\",\"id\":\"text-classification\",\"label\":\"Text + Classification\",\"libraries\":[\"adapter-transformers\",\"setfit\",\"spacy\",\"transformers\",\"transformers.js\"]},\"text-generation\":{\"datasets\":[{\"description\":\"A + large multilingual dataset of text crawled from the web.\",\"id\":\"mc4\"},{\"description\":\"Diverse + open-source data consisting of 22 smaller high-quality datasets. It was used + to train GPT-Neo.\",\"id\":\"the_pile\"},{\"description\":\"Truly open-source, + curated and cleaned dialogue dataset.\",\"id\":\"HuggingFaceH4/ultrachat_200k\"},{\"description\":\"An + instruction dataset with preference ratings on responses.\",\"id\":\"openbmb/UltraFeedback\"},{\"description\":\"A + large synthetic dataset for alignment of text generation models.\",\"id\":\"argilla/magpie-ultra-v0.1\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"Once + upon a time,\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Output\",\"content\":\"Once + upon a time, we knew that our ancestors were on the verge of extinction. The + great explorers and poets of the Old World, from Alexander the Great to Chaucer, + are dead and gone. A good many of our ancient explorers and poets have\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Cross + Entropy is a metric that calculates the difference between two probability + distributions. Each probability distribution is the distribution of predicted + words\",\"id\":\"Cross Entropy\"},{\"description\":\"The Perplexity metric + is the exponential of the cross-entropy loss. It evaluates the probabilities + assigned to the next word by the model. Lower perplexity indicates better + performance\",\"id\":\"Perplexity\"}],\"models\":[{\"description\":\"A text-generation + model trained to follow instructions.\",\"id\":\"google/gemma-2-2b-it\"},{\"description\":\"Very + powerful text generation model trained to follow instructions.\",\"id\":\"meta-llama/Meta-Llama-3.1-8B-Instruct\"},{\"description\":\"Small + yet powerful text generation model.\",\"id\":\"microsoft/Phi-3-mini-4k-instruct\"},{\"description\":\"A + very powerful model that can solve mathematical problems.\",\"id\":\"AI-MO/NuminaMath-7B-TIR\"},{\"description\":\"Strong + text generation model to follow instructions.\",\"id\":\"Qwen/Qwen2.5-7B-Instruct\"},{\"description\":\"Very + strong open-source large language model.\",\"id\":\"nvidia/Llama-3.1-Nemotron-70B-Instruct\"}],\"spaces\":[{\"description\":\"A + leaderboard to compare different open-source text generation models based + on various benchmarks.\",\"id\":\"open-llm-leaderboard/open_llm_leaderboard\"},{\"description\":\"A + leaderboard for comparing chain-of-thought performance of models.\",\"id\":\"logikon/open_cot_leaderboard\"},{\"description\":\"An + text generation based application based on a very powerful LLaMA2 model.\",\"id\":\"ysharma/Explore_llamav2_with_TGI\"},{\"description\":\"An + text generation based application to converse with Zephyr model.\",\"id\":\"HuggingFaceH4/zephyr-chat\"},{\"description\":\"A + leaderboard that ranks text generation models based on blind votes from people.\",\"id\":\"lmsys/chatbot-arena-leaderboard\"},{\"description\":\"An + chatbot to converse with a very powerful text generation model.\",\"id\":\"mlabonne/phixtral-chat\"}],\"summary\":\"Generating + text is the task of generating new text given another text. These models can, + for example, fill in incomplete text or paraphrase.\",\"widgetModels\":[\"mistralai/Mistral-Nemo-Instruct-2407\"],\"youtubeId\":\"e9gNEAlsOvU\",\"id\":\"text-generation\",\"label\":\"Text + Generation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"text-to-image\":{\"datasets\":[{\"description\":\"RedCaps + is a large-scale dataset of 12M image-text pairs collected from Reddit.\",\"id\":\"red_caps\"},{\"description\":\"Conceptual + Captions is a dataset consisting of ~3.3M images annotated with captions.\",\"id\":\"conceptual_captions\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"A + city above clouds, pastel colors, Victorian style\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"image.jpeg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + Inception Score (IS) measure assesses diversity and meaningfulness. It uses + a generated image sample to predict its label. A higher score signifies more + diverse and meaningful images.\",\"id\":\"IS\"},{\"description\":\"The Fr\xE9chet + Inception Distance (FID) calculates the distance between distributions between + synthetic and real samples. A lower FID score indicates better similarity + between the distributions of real and generated images.\",\"id\":\"FID\"},{\"description\":\"R-precision + assesses how the generated image aligns with the provided text description. + It uses the generated images as queries to retrieve relevant text descriptions. + The top 'r' relevant descriptions are selected and used to calculate R-precision + as r/R, where 'R' is the number of ground truth descriptions associated with + the generated images. A higher R-precision value indicates a better model.\",\"id\":\"R-Precision\"}],\"models\":[{\"description\":\"One + of the most powerful image generation models that can generate realistic outputs.\",\"id\":\"black-forest-labs/FLUX.1-dev\"},{\"description\":\"A + powerful yet fast image generation model.\",\"id\":\"latent-consistency/lcm-lora-sdxl\"},{\"description\":\"Text-to-image + model for photorealistic generation.\",\"id\":\"Kwai-Kolors/Kolors\"},{\"description\":\"A + powerful text-to-image model.\",\"id\":\"stabilityai/stable-diffusion-3-medium-diffusers\"}],\"spaces\":[{\"description\":\"A + powerful text-to-image application.\",\"id\":\"stabilityai/stable-diffusion-3-medium\"},{\"description\":\"A + text-to-image application to generate comics.\",\"id\":\"jbilcke-hf/ai-comic-factory\"},{\"description\":\"An + application to match multiple custom image generation models.\",\"id\":\"multimodalart/flux-lora-lab\"},{\"description\":\"A + powerful yet very fast image generation application.\",\"id\":\"latent-consistency/lcm-lora-for-sdxl\"},{\"description\":\"A + gallery to explore various text-to-image models.\",\"id\":\"multimodalart/LoraTheExplorer\"},{\"description\":\"An + application for `text-to-image`, `image-to-image` and image inpainting.\",\"id\":\"ArtGAN/Stable-Diffusion-ControlNet-WebUI\"},{\"description\":\"An + application to generate realistic images given photos of a person and a prompt.\",\"id\":\"InstantX/InstantID\"}],\"summary\":\"Text-to-image + is the task of generating images from input text. These pipelines can also + be used to modify and edit images based on text prompts.\",\"widgetModels\":[\"black-forest-labs/FLUX.1-dev\"],\"youtubeId\":\"\",\"id\":\"text-to-image\",\"label\":\"Text-to-Image\",\"libraries\":[\"diffusers\"]},\"text-to-speech\":{\"canonicalId\":\"text-to-audio\",\"datasets\":[{\"description\":\"10K + hours of multi-speaker English dataset.\",\"id\":\"parler-tts/mls_eng_10k\"},{\"description\":\"Multi-speaker + English dataset.\",\"id\":\"mythicinfinity/libritts_r\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"I + love audio models on the Hub!\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"audio.wav\",\"type\":\"audio\"}]},\"metrics\":[{\"description\":\"The + Mel Cepstral Distortion (MCD) metric is used to calculate the quality of generated + speech.\",\"id\":\"mel cepstral distortion\"}],\"models\":[{\"description\":\"A + powerful TTS model.\",\"id\":\"parler-tts/parler-tts-large-v1\"},{\"description\":\"A + massively multi-lingual TTS model.\",\"id\":\"coqui/XTTS-v2\"},{\"description\":\"Robust + TTS model.\",\"id\":\"metavoiceio/metavoice-1B-v0.1\"},{\"description\":\"A + prompt based, powerful TTS model.\",\"id\":\"parler-tts/parler_tts_mini_v0.1\"}],\"spaces\":[{\"description\":\"An + application for generate highly realistic, multilingual speech.\",\"id\":\"suno/bark\"},{\"description\":\"An + application on XTTS, a voice generation model that lets you clone voices into + different languages.\",\"id\":\"coqui/xtts\"},{\"description\":\"An application + that generates speech in different styles in English and Chinese.\",\"id\":\"mrfakename/E2-F5-TTS\"},{\"description\":\"An + application that synthesizes speech for diverse speaker prompts.\",\"id\":\"parler-tts/parler_tts_mini\"}],\"summary\":\"Text-to-Speech + (TTS) is the task of generating natural sounding speech given text input. + TTS models can be extended to have a single model that generates speech for + multiple speakers and multiple languages.\",\"widgetModels\":[\"suno/bark\"],\"youtubeId\":\"NW62DpzJ274\",\"id\":\"text-to-speech\",\"label\":\"Text-to-Speech\",\"libraries\":[\"espnet\",\"tensorflowtts\",\"transformers\",\"transformers.js\"]},\"text-to-video\":{\"datasets\":[{\"description\":\"Microsoft + Research Video to Text is a large-scale dataset for open domain video captioning\",\"id\":\"iejMac/CLIP-MSR-VTT\"},{\"description\":\"UCF101 + Human Actions dataset consists of 13,320 video clips from YouTube, with 101 + classes.\",\"id\":\"quchenyuan/UCF101-ZIP\"},{\"description\":\"A high-quality + dataset for human action recognition in YouTube videos.\",\"id\":\"nateraw/kinetics\"},{\"description\":\"A + dataset of video clips of humans performing pre-defined basic actions with + everyday objects.\",\"id\":\"HuggingFaceM4/something_something_v2\"},{\"description\":\"This + dataset consists of text-video pairs and contains noisy samples with irrelevant + video descriptions\",\"id\":\"HuggingFaceM4/webvid\"},{\"description\":\"A + dataset of short Flickr videos for the temporal localization of events with + descriptions.\",\"id\":\"iejMac/CLIP-DiDeMo\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"Darth + Vader is surfing on the waves.\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"text-to-video-output.gif\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"Inception + Score uses an image classification model that predicts class labels and evaluates + how distinct and diverse the images are. A higher score indicates better video + generation.\",\"id\":\"is\"},{\"description\":\"Frechet Inception Distance + uses an image classification model to obtain image embeddings. The metric + compares mean and standard deviation of the embeddings of real and generated + images. A smaller score indicates better video generation.\",\"id\":\"fid\"},{\"description\":\"Frechet + Video Distance uses a model that captures coherence for changes in frames + and the quality of each frame. A smaller score indicates better video generation.\",\"id\":\"fvd\"},{\"description\":\"CLIPSIM + measures similarity between video frames and text using an image-text similarity + model. A higher score indicates better video generation.\",\"id\":\"clipsim\"}],\"models\":[{\"description\":\"A + strong model for consistent video generation.\",\"id\":\"rain1011/pyramid-flow-sd3\"},{\"description\":\"A + robust model for text-to-video generation.\",\"id\":\"VideoCrafter/VideoCrafter2\"},{\"description\":\"A + cutting-edge text-to-video generation model.\",\"id\":\"TIGER-Lab/T2V-Turbo-V2\"}],\"spaces\":[{\"description\":\"An + application that generates video from text.\",\"id\":\"VideoCrafter/VideoCrafter\"},{\"description\":\"Consistent + video generation application.\",\"id\":\"TIGER-Lab/T2V-Turbo-V2\"},{\"description\":\"A + cutting edge video generation application.\",\"id\":\"Pyramid-Flow/pyramid-flow\"}],\"summary\":\"Text-to-video + models can be used in any application that requires generating consistent + sequence of images from text. \",\"widgetModels\":[],\"id\":\"text-to-video\",\"label\":\"Text-to-Video\",\"libraries\":[\"diffusers\"]},\"token-classification\":{\"datasets\":[{\"description\":\"A + widely used dataset useful to benchmark named entity recognition models.\",\"id\":\"eriktks/conll2003\"},{\"description\":\"A + multilingual dataset of Wikipedia articles annotated for named entity recognition + in over 150 different languages.\",\"id\":\"unimelb-nlp/wikiann\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"My + name is Omar and I live in Z\xFCrich.\",\"type\":\"text\"}],\"outputs\":[{\"text\":\"My + name is Omar and I live in Z\xFCrich.\",\"tokens\":[{\"type\":\"PERSON\",\"start\":11,\"end\":15},{\"type\":\"GPE\",\"start\":30,\"end\":36}],\"type\":\"text-with-tokens\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + robust performance model to identify people, locations, organizations and + names of miscellaneous entities.\",\"id\":\"dslim/bert-base-NER\"},{\"description\":\"A + strong model to identify people, locations, organizations and names in multiple + languages.\",\"id\":\"FacebookAI/xlm-roberta-large-finetuned-conll03-english\"},{\"description\":\"A + token classification model specialized on medical entity recognition.\",\"id\":\"blaze999/Medical-NER\"},{\"description\":\"Flair + models are typically the state of the art in named entity recognition tasks.\",\"id\":\"flair/ner-english\"}],\"spaces\":[{\"description\":\"An + application that can recognizes entities, extracts noun chunks and recognizes + various linguistic features of each token.\",\"id\":\"spacy/gradio_pipeline_visualizer\"}],\"summary\":\"Token + classification is a natural language understanding task in which a label is + assigned to some tokens in a text. Some popular token classification subtasks + are Named Entity Recognition (NER) and Part-of-Speech (PoS) tagging. NER models + could be trained to identify specific entities in a text, such as dates, individuals + and places; and PoS tagging would identify, for example, which words in a + text are verbs, nouns, and punctuation marks.\",\"widgetModels\":[\"FacebookAI/xlm-roberta-large-finetuned-conll03-english\"],\"youtubeId\":\"wVHdVlPScxA\",\"id\":\"token-classification\",\"label\":\"Token + Classification\",\"libraries\":[\"adapter-transformers\",\"flair\",\"spacy\",\"span-marker\",\"stanza\",\"transformers\",\"transformers.js\"]},\"translation\":{\"canonicalId\":\"text2text-generation\",\"datasets\":[{\"description\":\"A + dataset of copyright-free books translated into 16 different languages.\",\"id\":\"Helsinki-NLP/opus_books\"},{\"description\":\"An + example of translation between programming languages. This dataset consists + of functions in Java and C#.\",\"id\":\"google/code_x_glue_cc_code_to_code_trans\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"My + name is Omar and I live in Z\xFCrich.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Output\",\"content\":\"Mein + Name ist Omar und ich wohne in Z\xFCrich.\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"BLEU + score is calculated by counting the number of shared single or subsequent + tokens between the generated sequence and the reference. Subsequent n tokens + are called \u201Cn-grams\u201D. Unigram refers to a single token while bi-gram + refers to token pairs and n-grams refer to n subsequent tokens. The score + ranges from 0 to 1, where 1 means the translation perfectly matched and 0 + did not match at all\",\"id\":\"bleu\"},{\"description\":\"\",\"id\":\"sacrebleu\"}],\"models\":[{\"description\":\"Very + powerful model that can translate many languages between each other, especially + low-resource languages.\",\"id\":\"facebook/nllb-200-1.3B\"},{\"description\":\"A + general-purpose Transformer that can be used to translate from English to + German, French, or Romanian.\",\"id\":\"google-t5/t5-base\"}],\"spaces\":[{\"description\":\"An + application that can translate between 100 languages.\",\"id\":\"Iker/Translate-100-languages\"},{\"description\":\"An + application that can translate between many languages.\",\"id\":\"Geonmo/nllb-translation-demo\"}],\"summary\":\"Translation + is the task of converting text from one language to another.\",\"widgetModels\":[\"facebook/mbart-large-50-many-to-many-mmt\"],\"youtubeId\":\"1JvfrvZgi6c\",\"id\":\"translation\",\"label\":\"Translation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"unconditional-image-generation\":{\"datasets\":[{\"description\":\"The + CIFAR-100 dataset consists of 60000 32x32 colour images in 100 classes, with + 600 images per class.\",\"id\":\"cifar100\"},{\"description\":\"Multiple images + of celebrities, used for facial expression translation.\",\"id\":\"CelebA\"}],\"demo\":{\"inputs\":[{\"label\":\"Seed\",\"content\":\"42\",\"type\":\"text\"},{\"label\":\"Number + of images to generate:\",\"content\":\"4\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"unconditional-image-generation-output.jpeg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + inception score (IS) evaluates the quality of generated images. It measures + the diversity of the generated images (the model predictions are evenly distributed + across all possible labels) and their 'distinction' or 'sharpness' (the model + confidently predicts a single label for each image).\",\"id\":\"Inception + score (IS)\"},{\"description\":\"The Fr\xE9chet Inception Distance (FID) evaluates + the quality of images created by a generative model by calculating the distance + between feature vectors for real and generated images.\",\"id\":\"Fre\u0107het + Inception Distance (FID)\"}],\"models\":[{\"description\":\"High-quality image + generation model trained on the CIFAR-10 dataset. It synthesizes images of + the ten classes presented in the dataset using diffusion probabilistic models, + a class of latent variable models inspired by considerations from nonequilibrium + thermodynamics.\",\"id\":\"google/ddpm-cifar10-32\"},{\"description\":\"High-quality + image generation model trained on the 256x256 CelebA-HQ dataset. It synthesizes + images of faces using diffusion probabilistic models, a class of latent variable + models inspired by considerations from nonequilibrium thermodynamics.\",\"id\":\"google/ddpm-celebahq-256\"}],\"spaces\":[{\"description\":\"An + application that can generate realistic faces.\",\"id\":\"CompVis/celeba-latent-diffusion\"}],\"summary\":\"Unconditional + image generation is the task of generating images with no condition in any + context (like a prompt text or another image). Once trained, the model will + create images that resemble its training data distribution.\",\"widgetModels\":[\"\"],\"youtubeId\":\"\",\"id\":\"unconditional-image-generation\",\"label\":\"Unconditional + Image Generation\",\"libraries\":[\"diffusers\"]},\"video-text-to-text\":{\"datasets\":[{\"description\":\"Multiple-choice + questions and answers about videos.\",\"id\":\"lmms-lab/Video-MME\"},{\"description\":\"A + dataset of instructions and question-answer pairs about videos.\",\"id\":\"lmms-lab/VideoChatGPT\"},{\"description\":\"Large + video understanding dataset.\",\"id\":\"HuggingFaceFV/finevideo\"}],\"demo\":{\"inputs\":[{\"filename\":\"video-text-to-text-input.gif\",\"type\":\"img\"},{\"label\":\"Text + Prompt\",\"content\":\"What is happening in this video?\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"The + video shows a series of images showing a fountain with water jets and a variety + of colorful flowers and butterflies in the background.\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + robust video-text-to-text model that can take in image and video inputs.\",\"id\":\"llava-hf/llava-onevision-qwen2-72b-ov-hf\"},{\"description\":\"Large + and powerful video-text-to-text model that can take in image and video inputs.\",\"id\":\"llava-hf/LLaVA-NeXT-Video-34B-hf\"}],\"spaces\":[{\"description\":\"An + application to chat with a video-text-to-text model.\",\"id\":\"llava-hf/video-llava\"},{\"description\":\"A + leaderboard for various video-text-to-text models.\",\"id\":\"opencompass/openvlm_video_leaderboard\"}],\"summary\":\"Video-text-to-text + models take in a video and a text prompt and output text. These models are + also called video-language models.\",\"widgetModels\":[\"\"],\"youtubeId\":\"\",\"id\":\"video-text-to-text\",\"label\":\"Video-Text-to-Text\",\"libraries\":[\"transformers\"]},\"visual-question-answering\":{\"datasets\":[{\"description\":\"A + widely used dataset containing questions (with answers) about images.\",\"id\":\"Graphcore/vqa\"},{\"description\":\"A + dataset to benchmark visual reasoning based on text in images.\",\"id\":\"facebook/textvqa\"}],\"demo\":{\"inputs\":[{\"filename\":\"elephant.jpeg\",\"type\":\"img\"},{\"label\":\"Question\",\"content\":\"What + is in this image?\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"elephant\",\"score\":0.97},{\"label\":\"elephants\",\"score\":0.06},{\"label\":\"animal\",\"score\":0.003}]}]},\"isPlaceholder\":false,\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"Measures + how much a predicted answer differs from the ground truth based on the difference + in their semantic meaning.\",\"id\":\"wu-palmer similarity\"}],\"models\":[{\"description\":\"A + visual question answering model trained to convert charts and plots to text.\",\"id\":\"google/deplot\"},{\"description\":\"A + visual question answering model trained for mathematical reasoning and chart + derendering from images.\",\"id\":\"google/matcha-base\"},{\"description\":\"A + strong visual question answering that answers questions from book covers.\",\"id\":\"google/pix2struct-ocrvqa-large\"}],\"spaces\":[{\"description\":\"An + application that compares visual question answering models across different + tasks.\",\"id\":\"merve/pix2struct\"},{\"description\":\"An application that + can answer questions based on images.\",\"id\":\"nielsr/vilt-vqa\"},{\"description\":\"An + application that can caption images and answer questions about a given image. + \",\"id\":\"Salesforce/BLIP\"},{\"description\":\"An application that can + caption images and answer questions about a given image. \",\"id\":\"vumichien/Img2Prompt\"}],\"summary\":\"Visual + Question Answering is the task of answering open-ended questions based on + an image. They output natural language responses to natural language questions.\",\"widgetModels\":[\"dandelin/vilt-b32-finetuned-vqa\"],\"youtubeId\":\"\",\"id\":\"visual-question-answering\",\"label\":\"Visual + Question Answering\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"zero-shot-classification\":{\"datasets\":[{\"description\":\"A + widely used dataset used to benchmark multiple variants of text classification.\",\"id\":\"nyu-mll/glue\"},{\"description\":\"The + Multi-Genre Natural Language Inference (MultiNLI) corpus is a crowd-sourced + collection of 433k sentence pairs annotated with textual entailment information.\",\"id\":\"nyu-mll/multi_nli\"},{\"description\":\"FEVER + is a publicly available dataset for fact extraction and verification against + textual sources.\",\"id\":\"fever/fever\"}],\"demo\":{\"inputs\":[{\"label\":\"Text + Input\",\"content\":\"Dune is the best movie ever.\",\"type\":\"text\"},{\"label\":\"Candidate + Labels\",\"content\":\"CINEMA, ART, MUSIC\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"CINEMA\",\"score\":0.9},{\"label\":\"ART\",\"score\":0.1},{\"label\":\"MUSIC\",\"score\":0}]}]},\"metrics\":[],\"models\":[{\"description\":\"Powerful + zero-shot text classification model.\",\"id\":\"facebook/bart-large-mnli\"},{\"description\":\"Powerful + zero-shot multilingual text classification model that can accomplish multiple + tasks.\",\"id\":\"MoritzLaurer/mDeBERTa-v3-base-xnli-multilingual-nli-2mil7\"}],\"spaces\":[],\"summary\":\"Zero-shot + text classification is a task in natural language processing where a model + is trained on a set of labeled examples but is then able to classify new examples + from previously unseen classes.\",\"widgetModels\":[\"facebook/bart-large-mnli\"],\"id\":\"zero-shot-classification\",\"label\":\"Zero-Shot + Classification\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"zero-shot-image-classification\":{\"datasets\":[{\"description\":\"\",\"id\":\"\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-classification-input.jpeg\",\"type\":\"img\"},{\"label\":\"Classes\",\"content\":\"cat, + dog, bird\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Cat\",\"score\":0.664},{\"label\":\"Dog\",\"score\":0.329},{\"label\":\"Bird\",\"score\":0.008}]}]},\"metrics\":[{\"description\":\"Computes + the number of times the correct label appears in top K labels predicted\",\"id\":\"top-K + accuracy\"}],\"models\":[{\"description\":\"Robust image classification model + trained on publicly available image-caption data.\",\"id\":\"openai/clip-vit-base-patch16\"},{\"description\":\"Strong + zero-shot image classification model.\",\"id\":\"google/siglip-so400m-patch14-224\"},{\"description\":\"Small + yet powerful zero-shot image classification model that can run on edge devices.\",\"id\":\"apple/MobileCLIP-S1-OpenCLIP\"},{\"description\":\"Strong + image classification model for biomedical domain.\",\"id\":\"microsoft/BiomedCLIP-PubMedBERT_256-vit_base_patch16_224\"}],\"spaces\":[{\"description\":\"An + application that leverages zero-shot image classification to find best captions + to generate an image. \",\"id\":\"pharma/CLIP-Interrogator\"},{\"description\":\"An + application to compare different zero-shot image classification models. \",\"id\":\"merve/compare_clip_siglip\"}],\"summary\":\"Zero-shot + image classification is the task of classifying previously unseen classes + during training of a model.\",\"widgetModels\":[\"google/siglip-so400m-patch14-224\"],\"youtubeId\":\"\",\"id\":\"zero-shot-image-classification\",\"label\":\"Zero-Shot + Image Classification\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"zero-shot-object-detection\":{\"datasets\":[],\"demo\":{\"inputs\":[{\"filename\":\"zero-shot-object-detection-input.jpg\",\"type\":\"img\"},{\"label\":\"Classes\",\"content\":\"cat, + dog, bird\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"zero-shot-object-detection-output.jpg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + Average Precision (AP) metric is the Area Under the PR Curve (AUC-PR). It + is calculated for each class separately\",\"id\":\"Average Precision\"},{\"description\":\"The + Mean Average Precision (mAP) metric is the overall average of the AP values\",\"id\":\"Mean + Average Precision\"},{\"description\":\"The AP\u03B1 metric is the Average + Precision at the IoU threshold of a \u03B1 value, for example, AP50 and AP75\",\"id\":\"AP\u03B1\"}],\"models\":[{\"description\":\"Solid + zero-shot object detection model.\",\"id\":\"IDEA-Research/grounding-dino-base\"},{\"description\":\"Cutting-edge + zero-shot object detection model.\",\"id\":\"google/owlv2-base-patch16-ensemble\"}],\"spaces\":[{\"description\":\"A + demo to try the state-of-the-art zero-shot object detection model, OWLv2.\",\"id\":\"merve/owlv2\"},{\"description\":\"A + demo that combines a zero-shot object detection and mask generation model + for zero-shot segmentation.\",\"id\":\"merve/OWLSAM\"}],\"summary\":\"Zero-shot + object detection is a computer vision task to detect objects and their classes + in images, without any prior training or knowledge of the classes. Zero-shot + object detection models receive an image as input, as well as a list of candidate + classes, and output the bounding boxes and labels where the objects have been + detected.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"zero-shot-object-detection\",\"label\":\"Zero-Shot + Object Detection\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"text-to-3d\":{\"datasets\":[{\"description\":\"A + large dataset of over 10 million 3D objects.\",\"id\":\"allenai/objaverse-xl\"},{\"description\":\"Descriptive + captions for 3D objects in Objaverse.\",\"id\":\"tiange/Cap3D\"}],\"demo\":{\"inputs\":[{\"label\":\"Prompt\",\"content\":\"a + cat statue\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Result\",\"content\":\"text-to-3d-3d-output-filename.glb\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"Text-to-3D + mesh model by OpenAI\",\"id\":\"openai/shap-e\"},{\"description\":\"Generative + 3D gaussian splatting model.\",\"id\":\"ashawkey/LGM\"}],\"spaces\":[{\"description\":\"Text-to-3D + demo with mesh outputs.\",\"id\":\"hysts/Shap-E\"},{\"description\":\"Text/image-to-3D + demo with splat outputs.\",\"id\":\"ashawkey/LGM\"}],\"summary\":\"Text-to-3D + models take in text input and produce 3D output.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"text-to-3d\",\"label\":\"Text-to-3D\",\"libraries\":[\"diffusers\"]},\"image-to-3d\":{\"datasets\":[{\"description\":\"A + large dataset of over 10 million 3D objects.\",\"id\":\"allenai/objaverse-xl\"},{\"description\":\"A + dataset of isolated object images for evaluating image-to-3D models.\",\"id\":\"dylanebert/iso3d\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-to-3d-image-input.png\",\"type\":\"img\"}],\"outputs\":[{\"label\":\"Result\",\"content\":\"image-to-3d-3d-output-filename.glb\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"Fast + image-to-3D mesh model by Tencent.\",\"id\":\"TencentARC/InstantMesh\"},{\"description\":\"Fast + image-to-3D mesh model by StabilityAI\",\"id\":\"stabilityai/TripoSR\"},{\"description\":\"A + scaled up image-to-3D mesh model derived from TripoSR.\",\"id\":\"hwjiang/Real3D\"},{\"description\":\"Generative + 3D gaussian splatting model.\",\"id\":\"ashawkey/LGM\"}],\"spaces\":[{\"description\":\"Leaderboard + to evaluate image-to-3D models.\",\"id\":\"dylanebert/3d-arena\"},{\"description\":\"Image-to-3D + demo with mesh outputs.\",\"id\":\"TencentARC/InstantMesh\"},{\"description\":\"Image-to-3D + demo with mesh outputs.\",\"id\":\"stabilityai/TripoSR\"},{\"description\":\"Image-to-3D + demo with mesh outputs.\",\"id\":\"hwjiang/Real3D\"},{\"description\":\"Image-to-3D + demo with splat outputs.\",\"id\":\"dylanebert/LGM-mini\"}],\"summary\":\"Image-to-3D + models take in image input and produce 3D output.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"image-to-3d\",\"label\":\"Image-to-3D\",\"libraries\":[\"diffusers\"]}}" + headers: + Access-Control-Allow-Origin: + - https://huggingface.co + Access-Control-Expose-Headers: + - X-Repo-Commit,X-Request-Id,X-Error-Code,X-Error-Message,X-Total-Count,ETag,Link,Accept-Ranges,Content-Range + Connection: + - keep-alive + Content-Length: + - '73726' + Content-Type: + - application/json; charset=utf-8 + Date: + - Tue, 29 Oct 2024 18:22:39 GMT + ETag: + - W/"11ffe-LYeRHzSmA1Ja2XTx5UWNPygcpnc" + Referrer-Policy: + - strict-origin-when-cross-origin + Vary: + - Origin + Via: + - 1.1 ed56247dd32d1f3f77d72a82c840f500.cloudfront.net (CloudFront) + X-Amz-Cf-Id: + - 2YzB5fEdQz_kG2Bvl968wkhGKpSFsBOkNQPWNAukYDKGyld5zdOIAw== + X-Amz-Cf-Pop: + - CCU50-P1 + X-Cache: + - Miss from cloudfront + X-Powered-By: + - huggingface-moon + X-Request-Id: + - Root=1-672127ef-71478eef0619b3fb1abad522;04c3b21c-b31c-4778-9ad5-26de32ae516e + cross-origin-opener-policy: + - same-origin + status: + code: 200 + message: OK +- request: + body: '{"inputs": "The goal of life is ."}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate, br, zstd + Connection: + - keep-alive + Content-Length: + - '41' + Content-Type: + - application/json + X-Amzn-Trace-Id: + - 6074b216-1ebc-4025-a61c-6422ccb2001a + user-agent: + - unknown/None; hf_hub/0.26.2; python/3.9.12; torch/2.4.1 + method: POST + uri: https://api-inference.huggingface.co/models/distilroberta-base + response: + body: + string: '[{"score":0.06832392513751984,"token":45075,"token_str":" immortality","sequence":"The + goal of life is immortality."},{"score":0.06822273880243301,"token":11098,"token_str":" + happiness","sequence":"The goal of life is happiness."},{"score":0.032912302762269974,"token":14314,"token_str":" + yours","sequence":"The goal of life is yours."},{"score":0.025098653510212898,"token":25342,"token_str":" + simplicity","sequence":"The goal of life is simplicity."},{"score":0.024168511852622032,"token":22211,"token_str":" + liberation","sequence":"The goal of life is liberation."}]' + headers: + Connection: + - keep-alive + Content-Length: + - '569' + Content-Type: + - application/json + Date: + - Tue, 29 Oct 2024 18:22:40 GMT + access-control-allow-credentials: + - 'true' + vary: + - Origin, Access-Control-Request-Method, Access-Control-Request-Headers + x-compute-time: + - '0.045' + x-compute-type: + - cache + x-request-id: + - sIAyUK3V1PQDN6HJ9Bpcz + x-sha: + - fb53ab8802853c8e4fbdbcd0529f21fc6f459b2b + status: + code: 200 + message: OK +version: 1 diff --git a/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_fill_mask_async.yaml b/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_fill_mask_async.yaml new file mode 100644 index 00000000000..fd59754abf2 --- /dev/null +++ b/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_fill_mask_async.yaml @@ -0,0 +1,777 @@ +interactions: +- request: + body: null + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate, br, zstd + Connection: + - keep-alive + X-Amzn-Trace-Id: + - 2f704cec-a633-4039-b0b7-28040e68feb5 + user-agent: + - unknown/None; hf_hub/0.26.2; python/3.9.12; torch/2.4.1 + method: GET + uri: https://huggingface.co/api/tasks + response: + body: + string: "{\"any-to-any\":{\"datasets\":[],\"demo\":{\"inputs\":[],\"outputs\":[]},\"isPlaceholder\":true,\"metrics\":[],\"models\":[],\"spaces\":[],\"summary\":\"\",\"widgetModels\":[],\"id\":\"any-to-any\",\"label\":\"Any-to-Any\",\"libraries\":[\"transformers\"]},\"audio-classification\":{\"datasets\":[{\"description\":\"A + benchmark of 10 different audio tasks.\",\"id\":\"s3prl/superb\"},{\"description\":\"A + dataset of YouTube clips and their sound categories.\",\"id\":\"agkphysics/AudioSet\"}],\"demo\":{\"inputs\":[{\"filename\":\"audio.wav\",\"type\":\"audio\"}],\"outputs\":[{\"data\":[{\"label\":\"Up\",\"score\":0.2},{\"label\":\"Down\",\"score\":0.8}],\"type\":\"chart\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"An + easy-to-use model for command recognition.\",\"id\":\"speechbrain/google_speech_command_xvector\"},{\"description\":\"An + emotion recognition model.\",\"id\":\"ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition\"},{\"description\":\"A + language identification model.\",\"id\":\"facebook/mms-lid-126\"}],\"spaces\":[{\"description\":\"An + application that can classify music into different genre.\",\"id\":\"kurianbenoy/audioclassification\"}],\"summary\":\"Audio + classification is the task of assigning a label or class to a given audio. + It can be used for recognizing which command a user is giving or the emotion + of a statement, as well as identifying a speaker.\",\"widgetModels\":[\"MIT/ast-finetuned-audioset-10-10-0.4593\"],\"youtubeId\":\"KWwzcmG98Ds\",\"id\":\"audio-classification\",\"label\":\"Audio + Classification\",\"libraries\":[\"speechbrain\",\"transformers\",\"transformers.js\"]},\"audio-to-audio\":{\"datasets\":[{\"description\":\"512-element + X-vector embeddings of speakers from CMU ARCTIC dataset.\",\"id\":\"Matthijs/cmu-arctic-xvectors\"}],\"demo\":{\"inputs\":[{\"filename\":\"input.wav\",\"type\":\"audio\"}],\"outputs\":[{\"filename\":\"label-0.wav\",\"type\":\"audio\"},{\"filename\":\"label-1.wav\",\"type\":\"audio\"}]},\"metrics\":[{\"description\":\"The + Signal-to-Noise ratio is the relationship between the target signal level + and the background noise level. It is calculated as the logarithm of the target + signal divided by the background noise, in decibels.\",\"id\":\"snri\"},{\"description\":\"The + Signal-to-Distortion ratio is the relationship between the target signal and + the sum of noise, interference, and artifact errors\",\"id\":\"sdri\"}],\"models\":[{\"description\":\"A + solid model of audio source separation.\",\"id\":\"speechbrain/sepformer-wham\"},{\"description\":\"A + speech enhancement model.\",\"id\":\"ResembleAI/resemble-enhance\"},{\"description\":\"A + model that can change the voice in a speech recording.\",\"id\":\"microsoft/speecht5_vc\"}],\"spaces\":[{\"description\":\"An + application for speech separation.\",\"id\":\"younver/speechbrain-speech-separation\"},{\"description\":\"An + application for audio style transfer.\",\"id\":\"nakas/audio-diffusion_style_transfer\"}],\"summary\":\"Audio-to-Audio + is a family of tasks in which the input is an audio and the output is one + or multiple generated audios. Some example tasks are speech enhancement and + source separation.\",\"widgetModels\":[\"speechbrain/sepformer-wham\"],\"youtubeId\":\"iohj7nCCYoM\",\"id\":\"audio-to-audio\",\"label\":\"Audio-to-Audio\",\"libraries\":[\"asteroid\",\"fairseq\",\"speechbrain\"]},\"automatic-speech-recognition\":{\"datasets\":[{\"description\":\"31,175 + hours of multilingual audio-text dataset in 108 languages.\",\"id\":\"mozilla-foundation/common_voice_17_0\"},{\"description\":\"A + dataset with 44.6k hours of English speaker data and 6k hours of other language + speakers.\",\"id\":\"parler-tts/mls_eng\"},{\"description\":\"A multi-lingual + audio dataset with 370K hours of audio.\",\"id\":\"espnet/yodas\"}],\"demo\":{\"inputs\":[{\"filename\":\"input.flac\",\"type\":\"audio\"}],\"outputs\":[{\"label\":\"Transcript\",\"content\":\"Going + along slushy country roads and speaking to damp audiences in...\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"wer\"},{\"description\":\"\",\"id\":\"cer\"}],\"models\":[{\"description\":\"A + powerful ASR model by OpenAI.\",\"id\":\"openai/whisper-large-v3\"},{\"description\":\"A + good generic speech model by MetaAI for fine-tuning.\",\"id\":\"facebook/w2v-bert-2.0\"},{\"description\":\"An + end-to-end model that performs ASR and Speech Translation by MetaAI.\",\"id\":\"facebook/seamless-m4t-v2-large\"},{\"description\":\"Powerful + speaker diarization model.\",\"id\":\"pyannote/speaker-diarization-3.1\"}],\"spaces\":[{\"description\":\"A + powerful general-purpose speech recognition application.\",\"id\":\"hf-audio/whisper-large-v3\"},{\"description\":\"Fastest + speech recognition application.\",\"id\":\"sanchit-gandhi/whisper-jax\"},{\"description\":\"A + high quality speech and text translation model by Meta.\",\"id\":\"facebook/seamless_m4t\"}],\"summary\":\"Automatic + Speech Recognition (ASR), also known as Speech to Text (STT), is the task + of transcribing a given audio to text. It has many applications, such as voice + user interfaces.\",\"widgetModels\":[\"openai/whisper-large-v3\"],\"youtubeId\":\"TksaY_FDgnk\",\"id\":\"automatic-speech-recognition\",\"label\":\"Automatic + Speech Recognition\",\"libraries\":[\"espnet\",\"nemo\",\"speechbrain\",\"transformers\",\"transformers.js\"]},\"depth-estimation\":{\"datasets\":[{\"description\":\"NYU + Depth V2 Dataset: Video dataset containing both RGB and depth sensor data.\",\"id\":\"sayakpaul/nyu_depth_v2\"},{\"description\":\"Monocular + depth estimation benchmark based without noise and errors.\",\"id\":\"depth-anything/DA-2K\"}],\"demo\":{\"inputs\":[{\"filename\":\"depth-estimation-input.jpg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"depth-estimation-output.png\",\"type\":\"img\"}]},\"metrics\":[],\"models\":[{\"description\":\"Cutting-edge + depth estimation model.\",\"id\":\"depth-anything/Depth-Anything-V2-Large\"},{\"description\":\"A + strong monocular depth estimation model.\",\"id\":\"jingheya/lotus-depth-g-v1-0\"},{\"description\":\"A + depth estimation model that predicts depth in videos.\",\"id\":\"tencent/DepthCrafter\"},{\"description\":\"A + robust depth estimation model.\",\"id\":\"apple/DepthPro\"}],\"spaces\":[{\"description\":\"An + application that predicts the depth of an image and then reconstruct the 3D + model as voxels.\",\"id\":\"radames/dpt-depth-estimation-3d-voxels\"},{\"description\":\"An + application for bleeding-edge depth estimation.\",\"id\":\"akhaliq/depth-pro\"},{\"description\":\"An + application on cutting-edge depth estimation in videos.\",\"id\":\"tencent/DepthCrafter\"},{\"description\":\"A + human-centric depth estimation application.\",\"id\":\"facebook/sapiens-depth\"}],\"summary\":\"Depth + estimation is the task of predicting depth of the objects present in an image.\",\"widgetModels\":[\"\"],\"youtubeId\":\"\",\"id\":\"depth-estimation\",\"label\":\"Depth + Estimation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"document-question-answering\":{\"datasets\":[{\"description\":\"Largest + document understanding dataset.\",\"id\":\"HuggingFaceM4/Docmatix\"},{\"description\":\"Dataset + from the 2020 DocVQA challenge. The documents are taken from the UCSF Industry + Documents Library.\",\"id\":\"eliolio/docvqa\"}],\"demo\":{\"inputs\":[{\"label\":\"Question\",\"content\":\"What + is the idea behind the consumer relations efficiency team?\",\"type\":\"text\"},{\"filename\":\"document-question-answering-input.png\",\"type\":\"img\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"Balance + cost efficiency with quality customer service\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"The + evaluation metric for the DocVQA challenge is the Average Normalized Levenshtein + Similarity (ANLS). This metric is flexible to character regognition errors + and compares the predicted answer with the ground truth answer.\",\"id\":\"anls\"},{\"description\":\"Exact + Match is a metric based on the strict character match of the predicted answer + and the right answer. For answers predicted correctly, the Exact Match will + be 1. Even if only one character is different, Exact Match will be 0\",\"id\":\"exact-match\"}],\"models\":[{\"description\":\"A + robust document question answering model.\",\"id\":\"impira/layoutlm-document-qa\"},{\"description\":\"A + document question answering model specialized in invoices.\",\"id\":\"impira/layoutlm-invoices\"},{\"description\":\"A + special model for OCR-free document question answering.\",\"id\":\"microsoft/udop-large\"},{\"description\":\"A + powerful model for document question answering.\",\"id\":\"google/pix2struct-docvqa-large\"}],\"spaces\":[{\"description\":\"A + robust document question answering application.\",\"id\":\"impira/docquery\"},{\"description\":\"An + application that can answer questions from invoices.\",\"id\":\"impira/invoices\"},{\"description\":\"An + application to compare different document question answering models.\",\"id\":\"merve/compare_docvqa_models\"}],\"summary\":\"Document + Question Answering (also known as Document Visual Question Answering) is the + task of answering questions on document images. Document question answering + models take a (document, question) pair as input and return an answer in natural + language. Models usually rely on multi-modal features, combining text, position + of words (bounding-boxes) and image.\",\"widgetModels\":[\"impira/layoutlm-invoices\"],\"youtubeId\":\"\",\"id\":\"document-question-answering\",\"label\":\"Document + Question Answering\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"feature-extraction\":{\"datasets\":[{\"description\":\"Wikipedia + dataset containing cleaned articles of all languages. Can be used to train + `feature-extraction` models.\",\"id\":\"wikipedia\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"India, + officially the Republic of India, is a country in South Asia.\",\"type\":\"text\"}],\"outputs\":[{\"table\":[[\"Dimension + 1\",\"Dimension 2\",\"Dimension 3\"],[\"2.583383083343506\",\"2.757075071334839\",\"0.9023529887199402\"],[\"8.29393482208252\",\"1.1071064472198486\",\"2.03399395942688\"],[\"-0.7754912972450256\",\"-1.647324562072754\",\"-0.6113331913948059\"],[\"0.07087723910808563\",\"1.5942802429199219\",\"1.4610432386398315\"]],\"type\":\"tabular\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + powerful feature extraction model for natural language processing tasks.\",\"id\":\"thenlper/gte-large\"},{\"description\":\"A + strong feature extraction model for retrieval.\",\"id\":\"Alibaba-NLP/gte-Qwen1.5-7B-instruct\"}],\"spaces\":[{\"description\":\"A + leaderboard to rank text feature extraction models based on a benchmark.\",\"id\":\"mteb/leaderboard\"},{\"description\":\"A + leaderboard to rank best feature extraction models based on human feedback.\",\"id\":\"mteb/arena\"}],\"summary\":\"Feature + extraction is the task of extracting features learnt in a model.\",\"widgetModels\":[\"facebook/bart-base\"],\"id\":\"feature-extraction\",\"label\":\"Feature + Extraction\",\"libraries\":[\"sentence-transformers\",\"transformers\",\"transformers.js\"]},\"fill-mask\":{\"datasets\":[{\"description\":\"A + common dataset that is used to train models for many languages.\",\"id\":\"wikipedia\"},{\"description\":\"A + large English dataset with text crawled from the web.\",\"id\":\"c4\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"The + barked at me\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"wolf\",\"score\":0.487},{\"label\":\"dog\",\"score\":0.061},{\"label\":\"cat\",\"score\":0.058},{\"label\":\"fox\",\"score\":0.047},{\"label\":\"squirrel\",\"score\":0.025}]}]},\"metrics\":[{\"description\":\"Cross + Entropy is a metric that calculates the difference between two probability + distributions. Each probability distribution is the distribution of predicted + words\",\"id\":\"cross_entropy\"},{\"description\":\"Perplexity is the exponential + of the cross-entropy loss. It evaluates the probabilities assigned to the + next word by the model. Lower perplexity indicates better performance\",\"id\":\"perplexity\"}],\"models\":[{\"description\":\"The + famous BERT model.\",\"id\":\"google-bert/bert-base-uncased\"},{\"description\":\"A + multilingual model trained on 100 languages.\",\"id\":\"FacebookAI/xlm-roberta-base\"}],\"spaces\":[],\"summary\":\"Masked + language modeling is the task of masking some of the words in a sentence and + predicting which words should replace those masks. These models are useful + when we want to get a statistical understanding of the language in which the + model is trained in.\",\"widgetModels\":[\"distilroberta-base\"],\"youtubeId\":\"mqElG5QJWUg\",\"id\":\"fill-mask\",\"label\":\"Fill-Mask\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"image-classification\":{\"datasets\":[{\"description\":\"Benchmark + dataset used for image classification with images that belong to 100 classes.\",\"id\":\"cifar100\"},{\"description\":\"Dataset + consisting of images of garments.\",\"id\":\"fashion_mnist\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-classification-input.jpeg\",\"type\":\"img\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Egyptian + cat\",\"score\":0.514},{\"label\":\"Tabby cat\",\"score\":0.193},{\"label\":\"Tiger + cat\",\"score\":0.068}]}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + strong image classification model.\",\"id\":\"google/vit-base-patch16-224\"},{\"description\":\"A + robust image classification model.\",\"id\":\"facebook/deit-base-distilled-patch16-224\"},{\"description\":\"A + strong image classification model.\",\"id\":\"facebook/convnext-large-224\"}],\"spaces\":[{\"description\":\"An + application that classifies what a given image is about.\",\"id\":\"nielsr/perceiver-image-classification\"}],\"summary\":\"Image + classification is the task of assigning a label or class to an entire image. + Images are expected to have only one class for each image. Image classification + models take an image as input and return a prediction about which class the + image belongs to.\",\"widgetModels\":[\"google/vit-base-patch16-224\"],\"youtubeId\":\"tjAIM7BOYhw\",\"id\":\"image-classification\",\"label\":\"Image + Classification\",\"libraries\":[\"keras\",\"timm\",\"transformers\",\"transformers.js\"]},\"image-feature-extraction\":{\"datasets\":[{\"description\":\"ImageNet-1K + is a image classification dataset in which images are used to train image-feature-extraction + models.\",\"id\":\"imagenet-1k\"}],\"demo\":{\"inputs\":[{\"filename\":\"mask-generation-input.png\",\"type\":\"img\"}],\"outputs\":[{\"table\":[[\"Dimension + 1\",\"Dimension 2\",\"Dimension 3\"],[\"0.21236686408519745\",\"1.0919708013534546\",\"0.8512550592422485\"],[\"0.809657871723175\",\"-0.18544459342956543\",\"-0.7851548194885254\"],[\"1.3103108406066895\",\"-0.2479034662246704\",\"-0.9107287526130676\"],[\"1.8536205291748047\",\"-0.36419737339019775\",\"0.09717650711536407\"]],\"type\":\"tabular\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + powerful image feature extraction model.\",\"id\":\"timm/vit_large_patch14_dinov2.lvd142m\"},{\"description\":\"A + strong image feature extraction model.\",\"id\":\"nvidia/MambaVision-T-1K\"},{\"description\":\"A + robust image feature extraction model.\",\"id\":\"facebook/dino-vitb16\"},{\"description\":\"Strong + image feature extraction model made for information retrieval from documents.\",\"id\":\"vidore/colpali\"},{\"description\":\"Strong + image feature extraction model that can be used on images and documents.\",\"id\":\"OpenGVLab/InternViT-6B-448px-V1-2\"}],\"spaces\":[],\"summary\":\"Image + feature extraction is the task of extracting features learnt in a computer + vision model.\",\"widgetModels\":[],\"id\":\"image-feature-extraction\",\"label\":\"Image + Feature Extraction\",\"libraries\":[\"timm\",\"transformers\"]},\"image-segmentation\":{\"datasets\":[{\"description\":\"Scene + segmentation dataset.\",\"id\":\"scene_parse_150\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-segmentation-input.jpeg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"image-segmentation-output.png\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"Average + Precision (AP) is the Area Under the PR Curve (AUC-PR). It is calculated for + each semantic class separately\",\"id\":\"Average Precision\"},{\"description\":\"Mean + Average Precision (mAP) is the overall average of the AP values\",\"id\":\"Mean + Average Precision\"},{\"description\":\"Intersection over Union (IoU) is the + overlap of segmentation masks. Mean IoU is the average of the IoU of all semantic + classes\",\"id\":\"Mean Intersection over Union\"},{\"description\":\"AP\u03B1 + is the Average Precision at the IoU threshold of a \u03B1 value, for example, + AP50 and AP75\",\"id\":\"AP\u03B1\"}],\"models\":[{\"description\":\"Solid + semantic segmentation model trained on ADE20k.\",\"id\":\"openmmlab/upernet-convnext-small\"},{\"description\":\"Background + removal model.\",\"id\":\"briaai/RMBG-1.4\"},{\"description\":\"A multipurpose + image segmentation model for high resolution images.\",\"id\":\"ZhengPeng7/BiRefNet\"},{\"description\":\"Powerful + human-centric image segmentation model.\",\"id\":\"facebook/sapiens-seg-1b\"},{\"description\":\"Panoptic + segmentation model trained on the COCO (common objects) dataset.\",\"id\":\"facebook/mask2former-swin-large-coco-panoptic\"}],\"spaces\":[{\"description\":\"A + semantic segmentation application that can predict unseen instances out of + the box.\",\"id\":\"facebook/ov-seg\"},{\"description\":\"One of the strongest + segmentation applications.\",\"id\":\"jbrinkma/segment-anything\"},{\"description\":\"A + human-centric segmentation model.\",\"id\":\"facebook/sapiens-pose\"},{\"description\":\"An + instance segmentation application to predict neuronal cell types from microscopy + images.\",\"id\":\"rashmi/sartorius-cell-instance-segmentation\"},{\"description\":\"An + application that segments videos.\",\"id\":\"ArtGAN/Segment-Anything-Video\"},{\"description\":\"An + panoptic segmentation application built for outdoor environments.\",\"id\":\"segments/panoptic-segment-anything\"}],\"summary\":\"Image + Segmentation divides an image into segments where each pixel in the image + is mapped to an object. This task has multiple variants such as instance segmentation, + panoptic segmentation and semantic segmentation.\",\"widgetModels\":[\"nvidia/segformer-b0-finetuned-ade-512-512\"],\"youtubeId\":\"dKE8SIt9C-w\",\"id\":\"image-segmentation\",\"label\":\"Image + Segmentation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"image-to-image\":{\"datasets\":[{\"description\":\"Synthetic + dataset, for image relighting\",\"id\":\"VIDIT\"},{\"description\":\"Multiple + images of celebrities, used for facial expression translation\",\"id\":\"huggan/CelebA-faces\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-to-image-input.jpeg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"image-to-image-output.png\",\"type\":\"img\"}]},\"isPlaceholder\":false,\"metrics\":[{\"description\":\"Peak + Signal to Noise Ratio (PSNR) is an approximation of the human perception, + considering the ratio of the absolute intensity with respect to the variations. + Measured in dB, a high value indicates a high fidelity.\",\"id\":\"PSNR\"},{\"description\":\"Structural + Similarity Index (SSIM) is a perceptual metric which compares the luminance, + contrast and structure of two images. The values of SSIM range between -1 + and 1, and higher values indicate closer resemblance to the original image.\",\"id\":\"SSIM\"},{\"description\":\"Inception + Score (IS) is an analysis of the labels predicted by an image classification + model when presented with a sample of the generated images.\",\"id\":\"IS\"}],\"models\":[{\"description\":\"An + image-to-image model to improve image resolution.\",\"id\":\"fal/AuraSR-v2\"},{\"description\":\"A + model that increases the resolution of an image.\",\"id\":\"keras-io/super-resolution\"},{\"description\":\"A + model that creates a set of variations of the input image in the style of + DALL-E using Stable Diffusion.\",\"id\":\"lambdalabs/sd-image-variations-diffusers\"},{\"description\":\"A + model that generates images based on segments in the input image and the text + prompt.\",\"id\":\"mfidabel/controlnet-segment-anything\"},{\"description\":\"A + model that takes an image and an instruction to edit the image.\",\"id\":\"timbrooks/instruct-pix2pix\"}],\"spaces\":[{\"description\":\"Image + enhancer application for low light.\",\"id\":\"keras-io/low-light-image-enhancement\"},{\"description\":\"Style + transfer application.\",\"id\":\"keras-io/neural-style-transfer\"},{\"description\":\"An + application that generates images based on segment control.\",\"id\":\"mfidabel/controlnet-segment-anything\"},{\"description\":\"Image + generation application that takes image control and text prompt.\",\"id\":\"hysts/ControlNet\"},{\"description\":\"Colorize + any image using this app.\",\"id\":\"ioclab/brightness-controlnet\"},{\"description\":\"Edit + images with instructions.\",\"id\":\"timbrooks/instruct-pix2pix\"}],\"summary\":\"Image-to-image + is the task of transforming an input image through a variety of possible manipulations + and enhancements, such as super-resolution, image inpainting, colorization, + and more.\",\"widgetModels\":[\"stabilityai/stable-diffusion-2-inpainting\"],\"youtubeId\":\"\",\"id\":\"image-to-image\",\"label\":\"Image-to-Image\",\"libraries\":[\"diffusers\",\"transformers\",\"transformers.js\"]},\"image-text-to-text\":{\"datasets\":[{\"description\":\"Instructions + composed of image and text.\",\"id\":\"liuhaotian/LLaVA-Instruct-150K\"},{\"description\":\"Conversation + turns where questions involve image and text.\",\"id\":\"liuhaotian/LLaVA-Pretrain\"},{\"description\":\"A + collection of datasets made for model fine-tuning.\",\"id\":\"HuggingFaceM4/the_cauldron\"},{\"description\":\"Screenshots + of websites with their HTML/CSS codes.\",\"id\":\"HuggingFaceM4/WebSight\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-text-to-text-input.png\",\"type\":\"img\"},{\"label\":\"Text + Prompt\",\"content\":\"Describe the position of the bee in detail.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"The + bee is sitting on a pink flower, surrounded by other flowers. The bee is positioned + in the center of the flower, with its head and front legs sticking out.\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"Powerful + vision language model with great visual understanding and reasoning capabilities.\",\"id\":\"meta-llama/Llama-3.2-11B-Vision-Instruct\"},{\"description\":\"Cutting-edge + vision language models.\",\"id\":\"allenai/Molmo-7B-D-0924\"},{\"description\":\"Small + yet powerful model.\",\"id\":\"vikhyatk/moondream2\"},{\"description\":\"Strong + image-text-to-text model.\",\"id\":\"Qwen/Qwen2-VL-7B-Instruct\"},{\"description\":\"Strong + image-text-to-text model.\",\"id\":\"mistralai/Pixtral-12B-2409\"},{\"description\":\"Strong + image-text-to-text model focused on documents.\",\"id\":\"stepfun-ai/GOT-OCR2_0\"}],\"spaces\":[{\"description\":\"Leaderboard + to evaluate vision language models.\",\"id\":\"opencompass/open_vlm_leaderboard\"},{\"description\":\"Vision + language models arena, where models are ranked by votes of users.\",\"id\":\"WildVision/vision-arena\"},{\"description\":\"Powerful + vision-language model assistant.\",\"id\":\"akhaliq/Molmo-7B-D-0924\"},{\"description\":\"An + image-text-to-text application focused on documents.\",\"id\":\"stepfun-ai/GOT_official_online_demo\"},{\"description\":\"An + application to compare outputs of different vision language models.\",\"id\":\"merve/compare_VLMs\"},{\"description\":\"An + application for chatting with an image-text-to-text model.\",\"id\":\"GanymedeNil/Qwen2-VL-7B\"}],\"summary\":\"Image-text-to-text + models take in an image and text prompt and output text. These models are + also called vision-language models, or VLMs. The difference from image-to-text + models is that these models take an additional text input, not restricting + the model to certain use cases like image captioning, and may also be trained + to accept a conversation as input.\",\"widgetModels\":[\"meta-llama/Llama-3.2-11B-Vision-Instruct\"],\"youtubeId\":\"IoGaGfU1CIg\",\"id\":\"image-text-to-text\",\"label\":\"Image-Text-to-Text\",\"libraries\":[\"transformers\"]},\"image-to-text\":{\"datasets\":[{\"description\":\"Dataset + from 12M image-text of Reddit\",\"id\":\"red_caps\"},{\"description\":\"Dataset + from 3.3M images of Google\",\"id\":\"datasets/conceptual_captions\"}],\"demo\":{\"inputs\":[{\"filename\":\"savanna.jpg\",\"type\":\"img\"}],\"outputs\":[{\"label\":\"Detailed + description\",\"content\":\"a herd of giraffes and zebras grazing in a field\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + robust image captioning model.\",\"id\":\"Salesforce/blip2-opt-2.7b\"},{\"description\":\"A + powerful and accurate image-to-text model that can also localize concepts + in images.\",\"id\":\"microsoft/kosmos-2-patch14-224\"},{\"description\":\"A + strong optical character recognition model.\",\"id\":\"facebook/nougat-base\"},{\"description\":\"A + powerful model that lets you have a conversation with the image.\",\"id\":\"llava-hf/llava-1.5-7b-hf\"}],\"spaces\":[{\"description\":\"An + application that compares various image captioning models.\",\"id\":\"nielsr/comparing-captioning-models\"},{\"description\":\"A + robust image captioning application.\",\"id\":\"flax-community/image-captioning\"},{\"description\":\"An + application that transcribes handwritings into text.\",\"id\":\"nielsr/TrOCR-handwritten\"},{\"description\":\"An + application that can caption images and answer questions about a given image.\",\"id\":\"Salesforce/BLIP\"},{\"description\":\"An + application that can caption images and answer questions with a conversational + agent.\",\"id\":\"Salesforce/BLIP2\"},{\"description\":\"An image captioning + application that demonstrates the effect of noise on captions.\",\"id\":\"johko/capdec-image-captioning\"}],\"summary\":\"Image + to text models output a text from a given image. Image captioning or optical + character recognition can be considered as the most common applications of + image to text.\",\"widgetModels\":[\"Salesforce/blip-image-captioning-large\"],\"youtubeId\":\"\",\"id\":\"image-to-text\",\"label\":\"Image-to-Text\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"keypoint-detection\":{\"datasets\":[{\"description\":\"A + dataset of hand keypoints of over 500k examples.\",\"id\":\"Vincent-luo/hagrid-mediapipe-hands\"}],\"demo\":{\"inputs\":[{\"filename\":\"keypoint-detection-input.png\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"keypoint-detection-output.png\",\"type\":\"img\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + robust keypoint detection model.\",\"id\":\"magic-leap-community/superpoint\"},{\"description\":\"Strong + keypoint detection model used to detect human pose.\",\"id\":\"facebook/sapiens-pose-1b\"}],\"spaces\":[{\"description\":\"An + application that detects hand keypoints in real-time.\",\"id\":\"datasciencedojo/Hand-Keypoint-Detection-Realtime\"},{\"description\":\"An + application to try a universal keypoint detection model.\",\"id\":\"merve/SuperPoint\"}],\"summary\":\"Keypoint + detection is the task of identifying meaningful distinctive points or features + in an image.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"keypoint-detection\",\"label\":\"Keypoint + Detection\",\"libraries\":[\"transformers\"]},\"mask-generation\":{\"datasets\":[],\"demo\":{\"inputs\":[{\"filename\":\"mask-generation-input.png\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"mask-generation-output.png\",\"type\":\"img\"}]},\"metrics\":[],\"models\":[{\"description\":\"Small + yet powerful mask generation model.\",\"id\":\"Zigeng/SlimSAM-uniform-50\"},{\"description\":\"Very + strong mask generation model.\",\"id\":\"facebook/sam2-hiera-large\"}],\"spaces\":[{\"description\":\"An + application that combines a mask generation model with a zero-shot object + detection model for text-guided image segmentation.\",\"id\":\"merve/OWLSAM2\"},{\"description\":\"An + application that compares the performance of a large and a small mask generation + model.\",\"id\":\"merve/slimsam\"},{\"description\":\"An application based + on an improved mask generation model.\",\"id\":\"SkalskiP/segment-anything-model-2\"},{\"description\":\"An + application to remove objects from videos using mask generation models.\",\"id\":\"SkalskiP/SAM_and_ProPainter\"}],\"summary\":\"Mask + generation is the task of generating masks that identify a specific object + or region of interest in a given image. Masks are often used in segmentation + tasks, where they provide a precise way to isolate the object of interest + for further processing or analysis.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"mask-generation\",\"label\":\"Mask + Generation\",\"libraries\":[\"transformers\"]},\"object-detection\":{\"datasets\":[{\"description\":\"Widely + used benchmark dataset for multiple vision tasks.\",\"id\":\"merve/coco2017\"},{\"description\":\"Multi-task + computer vision benchmark.\",\"id\":\"merve/pascal-voc\"}],\"demo\":{\"inputs\":[{\"filename\":\"object-detection-input.jpg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"object-detection-output.jpg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + Average Precision (AP) metric is the Area Under the PR Curve (AUC-PR). It + is calculated for each class separately\",\"id\":\"Average Precision\"},{\"description\":\"The + Mean Average Precision (mAP) metric is the overall average of the AP values\",\"id\":\"Mean + Average Precision\"},{\"description\":\"The AP\u03B1 metric is the Average + Precision at the IoU threshold of a \u03B1 value, for example, AP50 and AP75\",\"id\":\"AP\u03B1\"}],\"models\":[{\"description\":\"Solid + object detection model pre-trained on the COCO 2017 dataset.\",\"id\":\"facebook/detr-resnet-50\"},{\"description\":\"Real-time + and accurate object detection model.\",\"id\":\"jameslahm/yolov10x\"},{\"description\":\"Fast + and accurate object detection model trained on COCO and Object365 datasets.\",\"id\":\"PekingU/rtdetr_r18vd_coco_o365\"}],\"spaces\":[{\"description\":\"Leaderboard + to compare various object detection models across several metrics.\",\"id\":\"hf-vision/object_detection_leaderboard\"},{\"description\":\"An + application that contains various object detection models to try from.\",\"id\":\"Gradio-Blocks/Object-Detection-With-DETR-and-YOLOS\"},{\"description\":\"An + application that shows multiple cutting edge techniques for object detection + and tracking.\",\"id\":\"kadirnar/torchyolo\"},{\"description\":\"An object + tracking, segmentation and inpainting application.\",\"id\":\"VIPLab/Track-Anything\"},{\"description\":\"Very + fast object tracking application based on object detection.\",\"id\":\"merve/RT-DETR-tracking-coco\"}],\"summary\":\"Object + Detection models allow users to identify objects of certain defined classes. + Object detection models receive an image as input and output the images with + bounding boxes and labels on detected objects.\",\"widgetModels\":[\"facebook/detr-resnet-50\"],\"youtubeId\":\"WdAeKSOpxhw\",\"id\":\"object-detection\",\"label\":\"Object + Detection\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"video-classification\":{\"datasets\":[{\"description\":\"Benchmark + dataset used for video classification with videos that belong to 400 classes.\",\"id\":\"kinetics400\"}],\"demo\":{\"inputs\":[{\"filename\":\"video-classification-input.gif\",\"type\":\"img\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Playing + Guitar\",\"score\":0.514},{\"label\":\"Playing Tennis\",\"score\":0.193},{\"label\":\"Cooking\",\"score\":0.068}]}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"Strong + Video Classification model trained on the Kinetics 400 dataset.\",\"id\":\"google/vivit-b-16x2-kinetics400\"},{\"description\":\"Strong + Video Classification model trained on the Kinetics 400 dataset.\",\"id\":\"microsoft/xclip-base-patch32\"}],\"spaces\":[{\"description\":\"An + application that classifies video at different timestamps.\",\"id\":\"nateraw/lavila\"},{\"description\":\"An + application that classifies video.\",\"id\":\"fcakyon/video-classification\"}],\"summary\":\"Video + classification is the task of assigning a label or class to an entire video. + Videos are expected to have only one class for each video. Video classification + models take a video as input and return a prediction about which class the + video belongs to.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"video-classification\",\"label\":\"Video + Classification\",\"libraries\":[\"transformers\"]},\"question-answering\":{\"datasets\":[{\"description\":\"A + famous question answering dataset based on English articles from Wikipedia.\",\"id\":\"squad_v2\"},{\"description\":\"A + dataset of aggregated anonymized actual queries issued to the Google search + engine.\",\"id\":\"natural_questions\"}],\"demo\":{\"inputs\":[{\"label\":\"Question\",\"content\":\"Which + name is also used to describe the Amazon rainforest in English?\",\"type\":\"text\"},{\"label\":\"Context\",\"content\":\"The + Amazon rainforest, also known in English as Amazonia or the Amazon Jungle\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"Amazonia\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Exact + Match is a metric based on the strict character match of the predicted answer + and the right answer. For answers predicted correctly, the Exact Match will + be 1. Even if only one character is different, Exact Match will be 0\",\"id\":\"exact-match\"},{\"description\":\" + The F1-Score metric is useful if we value both false positives and false negatives + equally. The F1-Score is calculated on each word in the predicted sequence + against the correct answer\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + robust baseline model for most question answering domains.\",\"id\":\"deepset/roberta-base-squad2\"},{\"description\":\"Small + yet robust model that can answer questions.\",\"id\":\"distilbert/distilbert-base-cased-distilled-squad\"},{\"description\":\"A + special model that can answer questions from tables.\",\"id\":\"google/tapas-base-finetuned-wtq\"}],\"spaces\":[{\"description\":\"An + application that can answer a long question from Wikipedia.\",\"id\":\"deepset/wikipedia-assistant\"}],\"summary\":\"Question + Answering models can retrieve the answer to a question from a given text, + which is useful for searching for an answer in a document. Some question answering + models can generate answers without context!\",\"widgetModels\":[\"deepset/roberta-base-squad2\"],\"youtubeId\":\"ajPx5LwJD-I\",\"id\":\"question-answering\",\"label\":\"Question + Answering\",\"libraries\":[\"adapter-transformers\",\"allennlp\",\"transformers\",\"transformers.js\"]},\"reinforcement-learning\":{\"datasets\":[{\"description\":\"A + curation of widely used datasets for Data Driven Deep Reinforcement Learning + (D4RL)\",\"id\":\"edbeeching/decision_transformer_gym_replay\"}],\"demo\":{\"inputs\":[{\"label\":\"State\",\"content\":\"Red + traffic light, pedestrians are about to pass.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Action\",\"content\":\"Stop + the car.\",\"type\":\"text\"},{\"label\":\"Next State\",\"content\":\"Yellow + light, pedestrians have crossed.\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Accumulated + reward across all time steps discounted by a factor that ranges between 0 + and 1 and determines how much the agent optimizes for future relative to immediate + rewards. Measures how good is the policy ultimately found by a given algorithm + considering uncertainty over the future.\",\"id\":\"Discounted Total Reward\"},{\"description\":\"Average + return obtained after running the policy for a certain number of evaluation + episodes. As opposed to total reward, mean reward considers how much reward + a given algorithm receives while learning.\",\"id\":\"Mean Reward\"},{\"description\":\"Measures + how good a given algorithm is after a predefined time. Some algorithms may + be guaranteed to converge to optimal behavior across many time steps. However, + an agent that reaches an acceptable level of optimality after a given time + horizon may be preferable to one that ultimately reaches optimality but takes + a long time.\",\"id\":\"Level of Performance After Some Time\"}],\"models\":[{\"description\":\"A + Reinforcement Learning model trained on expert data from the Gym Hopper environment\",\"id\":\"edbeeching/decision-transformer-gym-hopper-expert\"},{\"description\":\"A + PPO agent playing seals/CartPole-v0 using the stable-baselines3 library and + the RL Zoo.\",\"id\":\"HumanCompatibleAI/ppo-seals-CartPole-v0\"}],\"spaces\":[{\"description\":\"An + application for a cute puppy agent learning to catch a stick.\",\"id\":\"ThomasSimonini/Huggy\"},{\"description\":\"An + application to play Snowball Fight with a reinforcement learning agent.\",\"id\":\"ThomasSimonini/SnowballFight\"}],\"summary\":\"Reinforcement + learning is the computational approach of learning from action by interacting + with an environment through trial and error and receiving rewards (negative + or positive) as feedback\",\"widgetModels\":[],\"youtubeId\":\"q0BiUn5LiBc\",\"id\":\"reinforcement-learning\",\"label\":\"Reinforcement + Learning\",\"libraries\":[\"transformers\",\"stable-baselines3\",\"ml-agents\",\"sample-factory\"]},\"sentence-similarity\":{\"datasets\":[{\"description\":\"Bing + queries with relevant passages from various web sources.\",\"id\":\"ms_marco\"}],\"demo\":{\"inputs\":[{\"label\":\"Source + sentence\",\"content\":\"Machine learning is so easy.\",\"type\":\"text\"},{\"label\":\"Sentences + to compare to\",\"content\":\"Deep learning is so straightforward.\",\"type\":\"text\"},{\"label\":\"\",\"content\":\"This + is so difficult, like rocket science.\",\"type\":\"text\"},{\"label\":\"\",\"content\":\"I + can't believe how much I struggled with this.\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Deep + learning is so straightforward.\",\"score\":0.623},{\"label\":\"This is so + difficult, like rocket science.\",\"score\":0.413},{\"label\":\"I can't believe + how much I struggled with this.\",\"score\":0.256}]}]},\"metrics\":[{\"description\":\"Reciprocal + Rank is a measure used to rank the relevancy of documents given a set of documents. + Reciprocal Rank is the reciprocal of the rank of the document retrieved, meaning, + if the rank is 3, the Reciprocal Rank is 0.33. If the rank is 1, the Reciprocal + Rank is 1\",\"id\":\"Mean Reciprocal Rank\"},{\"description\":\"The similarity + of the embeddings is evaluated mainly on cosine similarity. It is calculated + as the cosine of the angle between two vectors. It is particularly useful + when your texts are not the same length\",\"id\":\"Cosine Similarity\"}],\"models\":[{\"description\":\"This + model works well for sentences and paragraphs and can be used for clustering/grouping + and semantic searches.\",\"id\":\"sentence-transformers/all-mpnet-base-v2\"},{\"description\":\"A + multilingual robust sentence similarity model..\",\"id\":\"BAAI/bge-m3\"}],\"spaces\":[{\"description\":\"An + application that leverages sentence similarity to answer questions from YouTube + videos.\",\"id\":\"Gradio-Blocks/Ask_Questions_To_YouTube_Videos\"},{\"description\":\"An + application that retrieves relevant PubMed abstracts for a given online article + which can be used as further references.\",\"id\":\"Gradio-Blocks/pubmed-abstract-retriever\"},{\"description\":\"An + application that leverages sentence similarity to summarize text.\",\"id\":\"nickmuchi/article-text-summarizer\"},{\"description\":\"A + guide that explains how Sentence Transformers can be used for semantic search.\",\"id\":\"sentence-transformers/Sentence_Transformers_for_semantic_search\"}],\"summary\":\"Sentence + Similarity is the task of determining how similar two texts are. Sentence + similarity models convert input texts into vectors (embeddings) that capture + semantic information and calculate how close (similar) they are between them. + This task is particularly useful for information retrieval and clustering/grouping.\",\"widgetModels\":[\"BAAI/bge-small-en-v1.5\"],\"youtubeId\":\"VCZq5AkbNEU\",\"id\":\"sentence-similarity\",\"label\":\"Sentence + Similarity\",\"libraries\":[\"sentence-transformers\",\"spacy\",\"transformers.js\"]},\"summarization\":{\"canonicalId\":\"text2text-generation\",\"datasets\":[{\"description\":\"News + articles in five different languages along with their summaries. Widely used + for benchmarking multilingual summarization models.\",\"id\":\"mlsum\"},{\"description\":\"English + conversations and their summaries. Useful for benchmarking conversational + agents.\",\"id\":\"samsum\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"The + tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey + building, and the tallest structure in Paris. Its base is square, measuring + 125 metres (410 ft) on each side. It was the first structure to reach a height + of 300 metres. Excluding transmitters, the Eiffel Tower is the second tallest + free-standing structure in France after the Millau Viaduct.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Output\",\"content\":\"The + tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey + building. It was the first structure to reach a height of 300 metres.\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"The + generated sequence is compared against its summary, and the overlap of tokens + are counted. ROUGE-N refers to overlap of N subsequent tokens, ROUGE-1 refers + to overlap of single tokens and ROUGE-2 is the overlap of two subsequent tokens.\",\"id\":\"rouge\"}],\"models\":[{\"description\":\"A + strong summarization model trained on English news articles. Excels at generating + factual summaries.\",\"id\":\"facebook/bart-large-cnn\"},{\"description\":\"A + summarization model trained on medical articles.\",\"id\":\"Falconsai/medical_summarization\"}],\"spaces\":[{\"description\":\"An + application that can summarize long paragraphs.\",\"id\":\"pszemraj/summarize-long-text\"},{\"description\":\"A + much needed summarization application for terms and conditions.\",\"id\":\"ml6team/distilbart-tos-summarizer-tosdr\"},{\"description\":\"An + application that summarizes long documents.\",\"id\":\"pszemraj/document-summarization\"},{\"description\":\"An + application that can detect errors in abstractive summarization.\",\"id\":\"ml6team/post-processing-summarization\"}],\"summary\":\"Summarization + is the task of producing a shorter version of a document while preserving + its important information. Some models can extract text from the original + input, while other models can generate entirely new text.\",\"widgetModels\":[\"facebook/bart-large-cnn\"],\"youtubeId\":\"yHnr5Dk2zCI\",\"id\":\"summarization\",\"label\":\"Summarization\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"table-question-answering\":{\"datasets\":[{\"description\":\"The + WikiTableQuestions dataset is a large-scale dataset for the task of question + answering on semi-structured tables.\",\"id\":\"wikitablequestions\"},{\"description\":\"WikiSQL + is a dataset of 80654 hand-annotated examples of questions and SQL queries + distributed across 24241 tables from Wikipedia.\",\"id\":\"wikisql\"}],\"demo\":{\"inputs\":[{\"table\":[[\"Rank\",\"Name\",\"No.of + reigns\",\"Combined days\"],[\"1\",\"lou Thesz\",\"3\",\"3749\"],[\"2\",\"Ric + Flair\",\"8\",\"3103\"],[\"3\",\"Harley Race\",\"7\",\"1799\"]],\"type\":\"tabular\"},{\"label\":\"Question\",\"content\":\"What + is the number of reigns for Harley Race?\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Result\",\"content\":\"7\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Checks + whether the predicted answer(s) is the same as the ground-truth answer(s).\",\"id\":\"Denotation + Accuracy\"}],\"models\":[{\"description\":\"A table question answering model + that is capable of neural SQL execution, i.e., employ TAPEX to execute a SQL + query on a given table.\",\"id\":\"microsoft/tapex-base\"},{\"description\":\"A + robust table question answering model.\",\"id\":\"google/tapas-base-finetuned-wtq\"}],\"spaces\":[{\"description\":\"An + application that answers questions based on table CSV files.\",\"id\":\"katanaml/table-query\"}],\"summary\":\"Table + Question Answering (Table QA) is the answering a question about an information + on a given table.\",\"widgetModels\":[\"google/tapas-base-finetuned-wtq\"],\"id\":\"table-question-answering\",\"label\":\"Table + Question Answering\",\"libraries\":[\"transformers\"]},\"tabular-classification\":{\"datasets\":[{\"description\":\"A + comprehensive curation of datasets covering all benchmarks.\",\"id\":\"inria-soda/tabular-benchmark\"}],\"demo\":{\"inputs\":[{\"table\":[[\"Glucose\",\"Blood + Pressure \",\"Skin Thickness\",\"Insulin\",\"BMI\"],[\"148\",\"72\",\"35\",\"0\",\"33.6\"],[\"150\",\"50\",\"30\",\"0\",\"35.1\"],[\"141\",\"60\",\"29\",\"1\",\"39.2\"]],\"type\":\"tabular\"}],\"outputs\":[{\"table\":[[\"Diabetes\"],[\"1\"],[\"1\"],[\"0\"]],\"type\":\"tabular\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"Breast + cancer prediction model based on decision trees.\",\"id\":\"scikit-learn/cancer-prediction-trees\"}],\"spaces\":[{\"description\":\"An + application that can predict defective products on a production line.\",\"id\":\"scikit-learn/tabular-playground\"},{\"description\":\"An + application that compares various tabular classification techniques on different + datasets.\",\"id\":\"scikit-learn/classification\"}],\"summary\":\"Tabular + classification is the task of classifying a target category (a group) based + on set of attributes.\",\"widgetModels\":[\"scikit-learn/tabular-playground\"],\"youtubeId\":\"\",\"id\":\"tabular-classification\",\"label\":\"Tabular + Classification\",\"libraries\":[\"sklearn\"]},\"tabular-regression\":{\"datasets\":[{\"description\":\"A + comprehensive curation of datasets covering all benchmarks.\",\"id\":\"inria-soda/tabular-benchmark\"}],\"demo\":{\"inputs\":[{\"table\":[[\"Car + Name\",\"Horsepower\",\"Weight\"],[\"ford torino\",\"140\",\"3,449\"],[\"amc + hornet\",\"97\",\"2,774\"],[\"toyota corolla\",\"65\",\"1,773\"]],\"type\":\"tabular\"}],\"outputs\":[{\"table\":[[\"MPG + (miles per gallon)\"],[\"17\"],[\"18\"],[\"31\"]],\"type\":\"tabular\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"mse\"},{\"description\":\"Coefficient + of determination (or R-squared) is a measure of how well the model fits the + data. Higher R-squared is considered a better fit.\",\"id\":\"r-squared\"}],\"models\":[{\"description\":\"Fish + weight prediction based on length measurements and species.\",\"id\":\"scikit-learn/Fish-Weight\"}],\"spaces\":[{\"description\":\"An + application that can predict weight of a fish based on set of attributes.\",\"id\":\"scikit-learn/fish-weight-prediction\"}],\"summary\":\"Tabular + regression is the task of predicting a numerical value given a set of attributes.\",\"widgetModels\":[\"scikit-learn/Fish-Weight\"],\"youtubeId\":\"\",\"id\":\"tabular-regression\",\"label\":\"Tabular + Regression\",\"libraries\":[\"sklearn\"]},\"text-classification\":{\"datasets\":[{\"description\":\"A + widely used dataset used to benchmark multiple variants of text classification.\",\"id\":\"nyu-mll/glue\"},{\"description\":\"A + text classification dataset used to benchmark natural language inference models\",\"id\":\"stanfordnlp/snli\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"I + love Hugging Face!\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"POSITIVE\",\"score\":0.9},{\"label\":\"NEUTRAL\",\"score\":0.1},{\"label\":\"NEGATIVE\",\"score\":0}]}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"The + F1 metric is the harmonic mean of the precision and recall. It can be calculated + as: F1 = 2 * (precision * recall) / (precision + recall)\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + robust model trained for sentiment analysis.\",\"id\":\"distilbert/distilbert-base-uncased-finetuned-sst-2-english\"},{\"description\":\"A + sentiment analysis model specialized in financial sentiment.\",\"id\":\"ProsusAI/finbert\"},{\"description\":\"A + sentiment analysis model specialized in analyzing tweets.\",\"id\":\"cardiffnlp/twitter-roberta-base-sentiment-latest\"},{\"description\":\"A + model that can classify languages.\",\"id\":\"papluca/xlm-roberta-base-language-detection\"},{\"description\":\"A + model that can classify text generation attacks.\",\"id\":\"meta-llama/Prompt-Guard-86M\"}],\"spaces\":[{\"description\":\"An + application that can classify financial sentiment.\",\"id\":\"IoannisTr/Tech_Stocks_Trading_Assistant\"},{\"description\":\"A + dashboard that contains various text classification tasks.\",\"id\":\"miesnerjacob/Multi-task-NLP\"},{\"description\":\"An + application that analyzes user reviews in healthcare.\",\"id\":\"spacy/healthsea-demo\"}],\"summary\":\"Text + Classification is the task of assigning a label or class to a given text. + Some use cases are sentiment analysis, natural language inference, and assessing + grammatical correctness.\",\"widgetModels\":[\"distilbert/distilbert-base-uncased-finetuned-sst-2-english\"],\"youtubeId\":\"leNG9fN9FQU\",\"id\":\"text-classification\",\"label\":\"Text + Classification\",\"libraries\":[\"adapter-transformers\",\"setfit\",\"spacy\",\"transformers\",\"transformers.js\"]},\"text-generation\":{\"datasets\":[{\"description\":\"A + large multilingual dataset of text crawled from the web.\",\"id\":\"mc4\"},{\"description\":\"Diverse + open-source data consisting of 22 smaller high-quality datasets. It was used + to train GPT-Neo.\",\"id\":\"the_pile\"},{\"description\":\"Truly open-source, + curated and cleaned dialogue dataset.\",\"id\":\"HuggingFaceH4/ultrachat_200k\"},{\"description\":\"An + instruction dataset with preference ratings on responses.\",\"id\":\"openbmb/UltraFeedback\"},{\"description\":\"A + large synthetic dataset for alignment of text generation models.\",\"id\":\"argilla/magpie-ultra-v0.1\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"Once + upon a time,\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Output\",\"content\":\"Once + upon a time, we knew that our ancestors were on the verge of extinction. The + great explorers and poets of the Old World, from Alexander the Great to Chaucer, + are dead and gone. A good many of our ancient explorers and poets have\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Cross + Entropy is a metric that calculates the difference between two probability + distributions. Each probability distribution is the distribution of predicted + words\",\"id\":\"Cross Entropy\"},{\"description\":\"The Perplexity metric + is the exponential of the cross-entropy loss. It evaluates the probabilities + assigned to the next word by the model. Lower perplexity indicates better + performance\",\"id\":\"Perplexity\"}],\"models\":[{\"description\":\"A text-generation + model trained to follow instructions.\",\"id\":\"google/gemma-2-2b-it\"},{\"description\":\"Very + powerful text generation model trained to follow instructions.\",\"id\":\"meta-llama/Meta-Llama-3.1-8B-Instruct\"},{\"description\":\"Small + yet powerful text generation model.\",\"id\":\"microsoft/Phi-3-mini-4k-instruct\"},{\"description\":\"A + very powerful model that can solve mathematical problems.\",\"id\":\"AI-MO/NuminaMath-7B-TIR\"},{\"description\":\"Strong + text generation model to follow instructions.\",\"id\":\"Qwen/Qwen2.5-7B-Instruct\"},{\"description\":\"Very + strong open-source large language model.\",\"id\":\"nvidia/Llama-3.1-Nemotron-70B-Instruct\"}],\"spaces\":[{\"description\":\"A + leaderboard to compare different open-source text generation models based + on various benchmarks.\",\"id\":\"open-llm-leaderboard/open_llm_leaderboard\"},{\"description\":\"A + leaderboard for comparing chain-of-thought performance of models.\",\"id\":\"logikon/open_cot_leaderboard\"},{\"description\":\"An + text generation based application based on a very powerful LLaMA2 model.\",\"id\":\"ysharma/Explore_llamav2_with_TGI\"},{\"description\":\"An + text generation based application to converse with Zephyr model.\",\"id\":\"HuggingFaceH4/zephyr-chat\"},{\"description\":\"A + leaderboard that ranks text generation models based on blind votes from people.\",\"id\":\"lmsys/chatbot-arena-leaderboard\"},{\"description\":\"An + chatbot to converse with a very powerful text generation model.\",\"id\":\"mlabonne/phixtral-chat\"}],\"summary\":\"Generating + text is the task of generating new text given another text. These models can, + for example, fill in incomplete text or paraphrase.\",\"widgetModels\":[\"mistralai/Mistral-Nemo-Instruct-2407\"],\"youtubeId\":\"e9gNEAlsOvU\",\"id\":\"text-generation\",\"label\":\"Text + Generation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"text-to-image\":{\"datasets\":[{\"description\":\"RedCaps + is a large-scale dataset of 12M image-text pairs collected from Reddit.\",\"id\":\"red_caps\"},{\"description\":\"Conceptual + Captions is a dataset consisting of ~3.3M images annotated with captions.\",\"id\":\"conceptual_captions\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"A + city above clouds, pastel colors, Victorian style\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"image.jpeg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + Inception Score (IS) measure assesses diversity and meaningfulness. It uses + a generated image sample to predict its label. A higher score signifies more + diverse and meaningful images.\",\"id\":\"IS\"},{\"description\":\"The Fr\xE9chet + Inception Distance (FID) calculates the distance between distributions between + synthetic and real samples. A lower FID score indicates better similarity + between the distributions of real and generated images.\",\"id\":\"FID\"},{\"description\":\"R-precision + assesses how the generated image aligns with the provided text description. + It uses the generated images as queries to retrieve relevant text descriptions. + The top 'r' relevant descriptions are selected and used to calculate R-precision + as r/R, where 'R' is the number of ground truth descriptions associated with + the generated images. A higher R-precision value indicates a better model.\",\"id\":\"R-Precision\"}],\"models\":[{\"description\":\"One + of the most powerful image generation models that can generate realistic outputs.\",\"id\":\"black-forest-labs/FLUX.1-dev\"},{\"description\":\"A + powerful yet fast image generation model.\",\"id\":\"latent-consistency/lcm-lora-sdxl\"},{\"description\":\"Text-to-image + model for photorealistic generation.\",\"id\":\"Kwai-Kolors/Kolors\"},{\"description\":\"A + powerful text-to-image model.\",\"id\":\"stabilityai/stable-diffusion-3-medium-diffusers\"}],\"spaces\":[{\"description\":\"A + powerful text-to-image application.\",\"id\":\"stabilityai/stable-diffusion-3-medium\"},{\"description\":\"A + text-to-image application to generate comics.\",\"id\":\"jbilcke-hf/ai-comic-factory\"},{\"description\":\"An + application to match multiple custom image generation models.\",\"id\":\"multimodalart/flux-lora-lab\"},{\"description\":\"A + powerful yet very fast image generation application.\",\"id\":\"latent-consistency/lcm-lora-for-sdxl\"},{\"description\":\"A + gallery to explore various text-to-image models.\",\"id\":\"multimodalart/LoraTheExplorer\"},{\"description\":\"An + application for `text-to-image`, `image-to-image` and image inpainting.\",\"id\":\"ArtGAN/Stable-Diffusion-ControlNet-WebUI\"},{\"description\":\"An + application to generate realistic images given photos of a person and a prompt.\",\"id\":\"InstantX/InstantID\"}],\"summary\":\"Text-to-image + is the task of generating images from input text. These pipelines can also + be used to modify and edit images based on text prompts.\",\"widgetModels\":[\"black-forest-labs/FLUX.1-dev\"],\"youtubeId\":\"\",\"id\":\"text-to-image\",\"label\":\"Text-to-Image\",\"libraries\":[\"diffusers\"]},\"text-to-speech\":{\"canonicalId\":\"text-to-audio\",\"datasets\":[{\"description\":\"10K + hours of multi-speaker English dataset.\",\"id\":\"parler-tts/mls_eng_10k\"},{\"description\":\"Multi-speaker + English dataset.\",\"id\":\"mythicinfinity/libritts_r\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"I + love audio models on the Hub!\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"audio.wav\",\"type\":\"audio\"}]},\"metrics\":[{\"description\":\"The + Mel Cepstral Distortion (MCD) metric is used to calculate the quality of generated + speech.\",\"id\":\"mel cepstral distortion\"}],\"models\":[{\"description\":\"A + powerful TTS model.\",\"id\":\"parler-tts/parler-tts-large-v1\"},{\"description\":\"A + massively multi-lingual TTS model.\",\"id\":\"coqui/XTTS-v2\"},{\"description\":\"Robust + TTS model.\",\"id\":\"metavoiceio/metavoice-1B-v0.1\"},{\"description\":\"A + prompt based, powerful TTS model.\",\"id\":\"parler-tts/parler_tts_mini_v0.1\"}],\"spaces\":[{\"description\":\"An + application for generate highly realistic, multilingual speech.\",\"id\":\"suno/bark\"},{\"description\":\"An + application on XTTS, a voice generation model that lets you clone voices into + different languages.\",\"id\":\"coqui/xtts\"},{\"description\":\"An application + that generates speech in different styles in English and Chinese.\",\"id\":\"mrfakename/E2-F5-TTS\"},{\"description\":\"An + application that synthesizes speech for diverse speaker prompts.\",\"id\":\"parler-tts/parler_tts_mini\"}],\"summary\":\"Text-to-Speech + (TTS) is the task of generating natural sounding speech given text input. + TTS models can be extended to have a single model that generates speech for + multiple speakers and multiple languages.\",\"widgetModels\":[\"suno/bark\"],\"youtubeId\":\"NW62DpzJ274\",\"id\":\"text-to-speech\",\"label\":\"Text-to-Speech\",\"libraries\":[\"espnet\",\"tensorflowtts\",\"transformers\",\"transformers.js\"]},\"text-to-video\":{\"datasets\":[{\"description\":\"Microsoft + Research Video to Text is a large-scale dataset for open domain video captioning\",\"id\":\"iejMac/CLIP-MSR-VTT\"},{\"description\":\"UCF101 + Human Actions dataset consists of 13,320 video clips from YouTube, with 101 + classes.\",\"id\":\"quchenyuan/UCF101-ZIP\"},{\"description\":\"A high-quality + dataset for human action recognition in YouTube videos.\",\"id\":\"nateraw/kinetics\"},{\"description\":\"A + dataset of video clips of humans performing pre-defined basic actions with + everyday objects.\",\"id\":\"HuggingFaceM4/something_something_v2\"},{\"description\":\"This + dataset consists of text-video pairs and contains noisy samples with irrelevant + video descriptions\",\"id\":\"HuggingFaceM4/webvid\"},{\"description\":\"A + dataset of short Flickr videos for the temporal localization of events with + descriptions.\",\"id\":\"iejMac/CLIP-DiDeMo\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"Darth + Vader is surfing on the waves.\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"text-to-video-output.gif\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"Inception + Score uses an image classification model that predicts class labels and evaluates + how distinct and diverse the images are. A higher score indicates better video + generation.\",\"id\":\"is\"},{\"description\":\"Frechet Inception Distance + uses an image classification model to obtain image embeddings. The metric + compares mean and standard deviation of the embeddings of real and generated + images. A smaller score indicates better video generation.\",\"id\":\"fid\"},{\"description\":\"Frechet + Video Distance uses a model that captures coherence for changes in frames + and the quality of each frame. A smaller score indicates better video generation.\",\"id\":\"fvd\"},{\"description\":\"CLIPSIM + measures similarity between video frames and text using an image-text similarity + model. A higher score indicates better video generation.\",\"id\":\"clipsim\"}],\"models\":[{\"description\":\"A + strong model for consistent video generation.\",\"id\":\"rain1011/pyramid-flow-sd3\"},{\"description\":\"A + robust model for text-to-video generation.\",\"id\":\"VideoCrafter/VideoCrafter2\"},{\"description\":\"A + cutting-edge text-to-video generation model.\",\"id\":\"TIGER-Lab/T2V-Turbo-V2\"}],\"spaces\":[{\"description\":\"An + application that generates video from text.\",\"id\":\"VideoCrafter/VideoCrafter\"},{\"description\":\"Consistent + video generation application.\",\"id\":\"TIGER-Lab/T2V-Turbo-V2\"},{\"description\":\"A + cutting edge video generation application.\",\"id\":\"Pyramid-Flow/pyramid-flow\"}],\"summary\":\"Text-to-video + models can be used in any application that requires generating consistent + sequence of images from text. \",\"widgetModels\":[],\"id\":\"text-to-video\",\"label\":\"Text-to-Video\",\"libraries\":[\"diffusers\"]},\"token-classification\":{\"datasets\":[{\"description\":\"A + widely used dataset useful to benchmark named entity recognition models.\",\"id\":\"eriktks/conll2003\"},{\"description\":\"A + multilingual dataset of Wikipedia articles annotated for named entity recognition + in over 150 different languages.\",\"id\":\"unimelb-nlp/wikiann\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"My + name is Omar and I live in Z\xFCrich.\",\"type\":\"text\"}],\"outputs\":[{\"text\":\"My + name is Omar and I live in Z\xFCrich.\",\"tokens\":[{\"type\":\"PERSON\",\"start\":11,\"end\":15},{\"type\":\"GPE\",\"start\":30,\"end\":36}],\"type\":\"text-with-tokens\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + robust performance model to identify people, locations, organizations and + names of miscellaneous entities.\",\"id\":\"dslim/bert-base-NER\"},{\"description\":\"A + strong model to identify people, locations, organizations and names in multiple + languages.\",\"id\":\"FacebookAI/xlm-roberta-large-finetuned-conll03-english\"},{\"description\":\"A + token classification model specialized on medical entity recognition.\",\"id\":\"blaze999/Medical-NER\"},{\"description\":\"Flair + models are typically the state of the art in named entity recognition tasks.\",\"id\":\"flair/ner-english\"}],\"spaces\":[{\"description\":\"An + application that can recognizes entities, extracts noun chunks and recognizes + various linguistic features of each token.\",\"id\":\"spacy/gradio_pipeline_visualizer\"}],\"summary\":\"Token + classification is a natural language understanding task in which a label is + assigned to some tokens in a text. Some popular token classification subtasks + are Named Entity Recognition (NER) and Part-of-Speech (PoS) tagging. NER models + could be trained to identify specific entities in a text, such as dates, individuals + and places; and PoS tagging would identify, for example, which words in a + text are verbs, nouns, and punctuation marks.\",\"widgetModels\":[\"FacebookAI/xlm-roberta-large-finetuned-conll03-english\"],\"youtubeId\":\"wVHdVlPScxA\",\"id\":\"token-classification\",\"label\":\"Token + Classification\",\"libraries\":[\"adapter-transformers\",\"flair\",\"spacy\",\"span-marker\",\"stanza\",\"transformers\",\"transformers.js\"]},\"translation\":{\"canonicalId\":\"text2text-generation\",\"datasets\":[{\"description\":\"A + dataset of copyright-free books translated into 16 different languages.\",\"id\":\"Helsinki-NLP/opus_books\"},{\"description\":\"An + example of translation between programming languages. This dataset consists + of functions in Java and C#.\",\"id\":\"google/code_x_glue_cc_code_to_code_trans\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"My + name is Omar and I live in Z\xFCrich.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Output\",\"content\":\"Mein + Name ist Omar und ich wohne in Z\xFCrich.\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"BLEU + score is calculated by counting the number of shared single or subsequent + tokens between the generated sequence and the reference. Subsequent n tokens + are called \u201Cn-grams\u201D. Unigram refers to a single token while bi-gram + refers to token pairs and n-grams refer to n subsequent tokens. The score + ranges from 0 to 1, where 1 means the translation perfectly matched and 0 + did not match at all\",\"id\":\"bleu\"},{\"description\":\"\",\"id\":\"sacrebleu\"}],\"models\":[{\"description\":\"Very + powerful model that can translate many languages between each other, especially + low-resource languages.\",\"id\":\"facebook/nllb-200-1.3B\"},{\"description\":\"A + general-purpose Transformer that can be used to translate from English to + German, French, or Romanian.\",\"id\":\"google-t5/t5-base\"}],\"spaces\":[{\"description\":\"An + application that can translate between 100 languages.\",\"id\":\"Iker/Translate-100-languages\"},{\"description\":\"An + application that can translate between many languages.\",\"id\":\"Geonmo/nllb-translation-demo\"}],\"summary\":\"Translation + is the task of converting text from one language to another.\",\"widgetModels\":[\"facebook/mbart-large-50-many-to-many-mmt\"],\"youtubeId\":\"1JvfrvZgi6c\",\"id\":\"translation\",\"label\":\"Translation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"unconditional-image-generation\":{\"datasets\":[{\"description\":\"The + CIFAR-100 dataset consists of 60000 32x32 colour images in 100 classes, with + 600 images per class.\",\"id\":\"cifar100\"},{\"description\":\"Multiple images + of celebrities, used for facial expression translation.\",\"id\":\"CelebA\"}],\"demo\":{\"inputs\":[{\"label\":\"Seed\",\"content\":\"42\",\"type\":\"text\"},{\"label\":\"Number + of images to generate:\",\"content\":\"4\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"unconditional-image-generation-output.jpeg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + inception score (IS) evaluates the quality of generated images. It measures + the diversity of the generated images (the model predictions are evenly distributed + across all possible labels) and their 'distinction' or 'sharpness' (the model + confidently predicts a single label for each image).\",\"id\":\"Inception + score (IS)\"},{\"description\":\"The Fr\xE9chet Inception Distance (FID) evaluates + the quality of images created by a generative model by calculating the distance + between feature vectors for real and generated images.\",\"id\":\"Fre\u0107het + Inception Distance (FID)\"}],\"models\":[{\"description\":\"High-quality image + generation model trained on the CIFAR-10 dataset. It synthesizes images of + the ten classes presented in the dataset using diffusion probabilistic models, + a class of latent variable models inspired by considerations from nonequilibrium + thermodynamics.\",\"id\":\"google/ddpm-cifar10-32\"},{\"description\":\"High-quality + image generation model trained on the 256x256 CelebA-HQ dataset. It synthesizes + images of faces using diffusion probabilistic models, a class of latent variable + models inspired by considerations from nonequilibrium thermodynamics.\",\"id\":\"google/ddpm-celebahq-256\"}],\"spaces\":[{\"description\":\"An + application that can generate realistic faces.\",\"id\":\"CompVis/celeba-latent-diffusion\"}],\"summary\":\"Unconditional + image generation is the task of generating images with no condition in any + context (like a prompt text or another image). Once trained, the model will + create images that resemble its training data distribution.\",\"widgetModels\":[\"\"],\"youtubeId\":\"\",\"id\":\"unconditional-image-generation\",\"label\":\"Unconditional + Image Generation\",\"libraries\":[\"diffusers\"]},\"video-text-to-text\":{\"datasets\":[{\"description\":\"Multiple-choice + questions and answers about videos.\",\"id\":\"lmms-lab/Video-MME\"},{\"description\":\"A + dataset of instructions and question-answer pairs about videos.\",\"id\":\"lmms-lab/VideoChatGPT\"},{\"description\":\"Large + video understanding dataset.\",\"id\":\"HuggingFaceFV/finevideo\"}],\"demo\":{\"inputs\":[{\"filename\":\"video-text-to-text-input.gif\",\"type\":\"img\"},{\"label\":\"Text + Prompt\",\"content\":\"What is happening in this video?\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"The + video shows a series of images showing a fountain with water jets and a variety + of colorful flowers and butterflies in the background.\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + robust video-text-to-text model that can take in image and video inputs.\",\"id\":\"llava-hf/llava-onevision-qwen2-72b-ov-hf\"},{\"description\":\"Large + and powerful video-text-to-text model that can take in image and video inputs.\",\"id\":\"llava-hf/LLaVA-NeXT-Video-34B-hf\"}],\"spaces\":[{\"description\":\"An + application to chat with a video-text-to-text model.\",\"id\":\"llava-hf/video-llava\"},{\"description\":\"A + leaderboard for various video-text-to-text models.\",\"id\":\"opencompass/openvlm_video_leaderboard\"}],\"summary\":\"Video-text-to-text + models take in a video and a text prompt and output text. These models are + also called video-language models.\",\"widgetModels\":[\"\"],\"youtubeId\":\"\",\"id\":\"video-text-to-text\",\"label\":\"Video-Text-to-Text\",\"libraries\":[\"transformers\"]},\"visual-question-answering\":{\"datasets\":[{\"description\":\"A + widely used dataset containing questions (with answers) about images.\",\"id\":\"Graphcore/vqa\"},{\"description\":\"A + dataset to benchmark visual reasoning based on text in images.\",\"id\":\"facebook/textvqa\"}],\"demo\":{\"inputs\":[{\"filename\":\"elephant.jpeg\",\"type\":\"img\"},{\"label\":\"Question\",\"content\":\"What + is in this image?\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"elephant\",\"score\":0.97},{\"label\":\"elephants\",\"score\":0.06},{\"label\":\"animal\",\"score\":0.003}]}]},\"isPlaceholder\":false,\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"Measures + how much a predicted answer differs from the ground truth based on the difference + in their semantic meaning.\",\"id\":\"wu-palmer similarity\"}],\"models\":[{\"description\":\"A + visual question answering model trained to convert charts and plots to text.\",\"id\":\"google/deplot\"},{\"description\":\"A + visual question answering model trained for mathematical reasoning and chart + derendering from images.\",\"id\":\"google/matcha-base\"},{\"description\":\"A + strong visual question answering that answers questions from book covers.\",\"id\":\"google/pix2struct-ocrvqa-large\"}],\"spaces\":[{\"description\":\"An + application that compares visual question answering models across different + tasks.\",\"id\":\"merve/pix2struct\"},{\"description\":\"An application that + can answer questions based on images.\",\"id\":\"nielsr/vilt-vqa\"},{\"description\":\"An + application that can caption images and answer questions about a given image. + \",\"id\":\"Salesforce/BLIP\"},{\"description\":\"An application that can + caption images and answer questions about a given image. \",\"id\":\"vumichien/Img2Prompt\"}],\"summary\":\"Visual + Question Answering is the task of answering open-ended questions based on + an image. They output natural language responses to natural language questions.\",\"widgetModels\":[\"dandelin/vilt-b32-finetuned-vqa\"],\"youtubeId\":\"\",\"id\":\"visual-question-answering\",\"label\":\"Visual + Question Answering\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"zero-shot-classification\":{\"datasets\":[{\"description\":\"A + widely used dataset used to benchmark multiple variants of text classification.\",\"id\":\"nyu-mll/glue\"},{\"description\":\"The + Multi-Genre Natural Language Inference (MultiNLI) corpus is a crowd-sourced + collection of 433k sentence pairs annotated with textual entailment information.\",\"id\":\"nyu-mll/multi_nli\"},{\"description\":\"FEVER + is a publicly available dataset for fact extraction and verification against + textual sources.\",\"id\":\"fever/fever\"}],\"demo\":{\"inputs\":[{\"label\":\"Text + Input\",\"content\":\"Dune is the best movie ever.\",\"type\":\"text\"},{\"label\":\"Candidate + Labels\",\"content\":\"CINEMA, ART, MUSIC\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"CINEMA\",\"score\":0.9},{\"label\":\"ART\",\"score\":0.1},{\"label\":\"MUSIC\",\"score\":0}]}]},\"metrics\":[],\"models\":[{\"description\":\"Powerful + zero-shot text classification model.\",\"id\":\"facebook/bart-large-mnli\"},{\"description\":\"Powerful + zero-shot multilingual text classification model that can accomplish multiple + tasks.\",\"id\":\"MoritzLaurer/mDeBERTa-v3-base-xnli-multilingual-nli-2mil7\"}],\"spaces\":[],\"summary\":\"Zero-shot + text classification is a task in natural language processing where a model + is trained on a set of labeled examples but is then able to classify new examples + from previously unseen classes.\",\"widgetModels\":[\"facebook/bart-large-mnli\"],\"id\":\"zero-shot-classification\",\"label\":\"Zero-Shot + Classification\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"zero-shot-image-classification\":{\"datasets\":[{\"description\":\"\",\"id\":\"\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-classification-input.jpeg\",\"type\":\"img\"},{\"label\":\"Classes\",\"content\":\"cat, + dog, bird\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Cat\",\"score\":0.664},{\"label\":\"Dog\",\"score\":0.329},{\"label\":\"Bird\",\"score\":0.008}]}]},\"metrics\":[{\"description\":\"Computes + the number of times the correct label appears in top K labels predicted\",\"id\":\"top-K + accuracy\"}],\"models\":[{\"description\":\"Robust image classification model + trained on publicly available image-caption data.\",\"id\":\"openai/clip-vit-base-patch16\"},{\"description\":\"Strong + zero-shot image classification model.\",\"id\":\"google/siglip-so400m-patch14-224\"},{\"description\":\"Small + yet powerful zero-shot image classification model that can run on edge devices.\",\"id\":\"apple/MobileCLIP-S1-OpenCLIP\"},{\"description\":\"Strong + image classification model for biomedical domain.\",\"id\":\"microsoft/BiomedCLIP-PubMedBERT_256-vit_base_patch16_224\"}],\"spaces\":[{\"description\":\"An + application that leverages zero-shot image classification to find best captions + to generate an image. \",\"id\":\"pharma/CLIP-Interrogator\"},{\"description\":\"An + application to compare different zero-shot image classification models. \",\"id\":\"merve/compare_clip_siglip\"}],\"summary\":\"Zero-shot + image classification is the task of classifying previously unseen classes + during training of a model.\",\"widgetModels\":[\"google/siglip-so400m-patch14-224\"],\"youtubeId\":\"\",\"id\":\"zero-shot-image-classification\",\"label\":\"Zero-Shot + Image Classification\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"zero-shot-object-detection\":{\"datasets\":[],\"demo\":{\"inputs\":[{\"filename\":\"zero-shot-object-detection-input.jpg\",\"type\":\"img\"},{\"label\":\"Classes\",\"content\":\"cat, + dog, bird\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"zero-shot-object-detection-output.jpg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + Average Precision (AP) metric is the Area Under the PR Curve (AUC-PR). It + is calculated for each class separately\",\"id\":\"Average Precision\"},{\"description\":\"The + Mean Average Precision (mAP) metric is the overall average of the AP values\",\"id\":\"Mean + Average Precision\"},{\"description\":\"The AP\u03B1 metric is the Average + Precision at the IoU threshold of a \u03B1 value, for example, AP50 and AP75\",\"id\":\"AP\u03B1\"}],\"models\":[{\"description\":\"Solid + zero-shot object detection model.\",\"id\":\"IDEA-Research/grounding-dino-base\"},{\"description\":\"Cutting-edge + zero-shot object detection model.\",\"id\":\"google/owlv2-base-patch16-ensemble\"}],\"spaces\":[{\"description\":\"A + demo to try the state-of-the-art zero-shot object detection model, OWLv2.\",\"id\":\"merve/owlv2\"},{\"description\":\"A + demo that combines a zero-shot object detection and mask generation model + for zero-shot segmentation.\",\"id\":\"merve/OWLSAM\"}],\"summary\":\"Zero-shot + object detection is a computer vision task to detect objects and their classes + in images, without any prior training or knowledge of the classes. Zero-shot + object detection models receive an image as input, as well as a list of candidate + classes, and output the bounding boxes and labels where the objects have been + detected.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"zero-shot-object-detection\",\"label\":\"Zero-Shot + Object Detection\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"text-to-3d\":{\"datasets\":[{\"description\":\"A + large dataset of over 10 million 3D objects.\",\"id\":\"allenai/objaverse-xl\"},{\"description\":\"Descriptive + captions for 3D objects in Objaverse.\",\"id\":\"tiange/Cap3D\"}],\"demo\":{\"inputs\":[{\"label\":\"Prompt\",\"content\":\"a + cat statue\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Result\",\"content\":\"text-to-3d-3d-output-filename.glb\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"Text-to-3D + mesh model by OpenAI\",\"id\":\"openai/shap-e\"},{\"description\":\"Generative + 3D gaussian splatting model.\",\"id\":\"ashawkey/LGM\"}],\"spaces\":[{\"description\":\"Text-to-3D + demo with mesh outputs.\",\"id\":\"hysts/Shap-E\"},{\"description\":\"Text/image-to-3D + demo with splat outputs.\",\"id\":\"ashawkey/LGM\"}],\"summary\":\"Text-to-3D + models take in text input and produce 3D output.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"text-to-3d\",\"label\":\"Text-to-3D\",\"libraries\":[\"diffusers\"]},\"image-to-3d\":{\"datasets\":[{\"description\":\"A + large dataset of over 10 million 3D objects.\",\"id\":\"allenai/objaverse-xl\"},{\"description\":\"A + dataset of isolated object images for evaluating image-to-3D models.\",\"id\":\"dylanebert/iso3d\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-to-3d-image-input.png\",\"type\":\"img\"}],\"outputs\":[{\"label\":\"Result\",\"content\":\"image-to-3d-3d-output-filename.glb\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"Fast + image-to-3D mesh model by Tencent.\",\"id\":\"TencentARC/InstantMesh\"},{\"description\":\"Fast + image-to-3D mesh model by StabilityAI\",\"id\":\"stabilityai/TripoSR\"},{\"description\":\"A + scaled up image-to-3D mesh model derived from TripoSR.\",\"id\":\"hwjiang/Real3D\"},{\"description\":\"Generative + 3D gaussian splatting model.\",\"id\":\"ashawkey/LGM\"}],\"spaces\":[{\"description\":\"Leaderboard + to evaluate image-to-3D models.\",\"id\":\"dylanebert/3d-arena\"},{\"description\":\"Image-to-3D + demo with mesh outputs.\",\"id\":\"TencentARC/InstantMesh\"},{\"description\":\"Image-to-3D + demo with mesh outputs.\",\"id\":\"stabilityai/TripoSR\"},{\"description\":\"Image-to-3D + demo with mesh outputs.\",\"id\":\"hwjiang/Real3D\"},{\"description\":\"Image-to-3D + demo with splat outputs.\",\"id\":\"dylanebert/LGM-mini\"}],\"summary\":\"Image-to-3D + models take in image input and produce 3D output.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"image-to-3d\",\"label\":\"Image-to-3D\",\"libraries\":[\"diffusers\"]}}" + headers: + Access-Control-Allow-Origin: + - https://huggingface.co + Access-Control-Expose-Headers: + - X-Repo-Commit,X-Request-Id,X-Error-Code,X-Error-Message,X-Total-Count,ETag,Link,Accept-Ranges,Content-Range + Connection: + - keep-alive + Content-Length: + - '73726' + Content-Type: + - application/json; charset=utf-8 + Date: + - Tue, 29 Oct 2024 18:23:54 GMT + ETag: + - W/"11ffe-LYeRHzSmA1Ja2XTx5UWNPygcpnc" + Referrer-Policy: + - strict-origin-when-cross-origin + Vary: + - Origin + Via: + - 1.1 bd2ba1348c844640ee7787a39e84f752.cloudfront.net (CloudFront) + X-Amz-Cf-Id: + - VEYo4LtQCNXwutVYvjGkAd2NNXB_OMtgfL9BC0Z8v2xMe9lDSXYLpg== + X-Amz-Cf-Pop: + - CCU50-P1 + X-Cache: + - Miss from cloudfront + X-Powered-By: + - huggingface-moon + X-Request-Id: + - Root=1-6721283a-5b6994280c128e0f2a70834c;2f704cec-a633-4039-b0b7-28040e68feb5 + cross-origin-opener-policy: + - same-origin + status: + code: 200 + message: OK +- request: + body: null + headers: + user-agent: + - unknown/None; hf_hub/0.26.2; python/3.9.12; torch/2.4.1 + method: POST + uri: https://api-inference.huggingface.co/models/distilroberta-base + response: + body: + string: '[{"score":0.06832392513751984,"token":45075,"token_str":" immortality","sequence":"The + goal of life is immortality."},{"score":0.06822273880243301,"token":11098,"token_str":" + happiness","sequence":"The goal of life is happiness."},{"score":0.032912302762269974,"token":14314,"token_str":" + yours","sequence":"The goal of life is yours."},{"score":0.025098653510212898,"token":25342,"token_str":" + simplicity","sequence":"The goal of life is simplicity."},{"score":0.024168511852622032,"token":22211,"token_str":" + liberation","sequence":"The goal of life is liberation."}]' + headers: + Access-Control-Allow-Credentials: + - 'true' + Connection: + - keep-alive + Content-Length: + - '569' + Content-Type: + - application/json + Date: + - Tue, 29 Oct 2024 18:23:55 GMT + Vary: + - Origin, Access-Control-Request-Method, Access-Control-Request-Headers + x-compute-time: + - '0.045' + x-compute-type: + - cache + x-request-id: + - rPdLaKpKt63qzBdvl_5FS + x-sha: + - fb53ab8802853c8e4fbdbcd0529f21fc6f459b2b + status: + code: 200 + message: OK +version: 1 diff --git a/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_image_classification.yaml b/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_image_classification.yaml new file mode 100644 index 00000000000..da6b0f48455 --- /dev/null +++ b/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_image_classification.yaml @@ -0,0 +1,1020 @@ +interactions: +- request: + body: null + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate, br, zstd + Connection: + - keep-alive + X-Amzn-Trace-Id: + - 4067bfb0-673e-46d9-ab20-10f62a9c633c + user-agent: + - unknown/None; hf_hub/0.26.2; python/3.9.12; torch/2.4.1 + method: GET + uri: https://huggingface.co/api/tasks + response: + body: + string: "{\"any-to-any\":{\"datasets\":[],\"demo\":{\"inputs\":[],\"outputs\":[]},\"isPlaceholder\":true,\"metrics\":[],\"models\":[],\"spaces\":[],\"summary\":\"\",\"widgetModels\":[],\"id\":\"any-to-any\",\"label\":\"Any-to-Any\",\"libraries\":[\"transformers\"]},\"audio-classification\":{\"datasets\":[{\"description\":\"A + benchmark of 10 different audio tasks.\",\"id\":\"s3prl/superb\"},{\"description\":\"A + dataset of YouTube clips and their sound categories.\",\"id\":\"agkphysics/AudioSet\"}],\"demo\":{\"inputs\":[{\"filename\":\"audio.wav\",\"type\":\"audio\"}],\"outputs\":[{\"data\":[{\"label\":\"Up\",\"score\":0.2},{\"label\":\"Down\",\"score\":0.8}],\"type\":\"chart\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"An + easy-to-use model for command recognition.\",\"id\":\"speechbrain/google_speech_command_xvector\"},{\"description\":\"An + emotion recognition model.\",\"id\":\"ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition\"},{\"description\":\"A + language identification model.\",\"id\":\"facebook/mms-lid-126\"}],\"spaces\":[{\"description\":\"An + application that can classify music into different genre.\",\"id\":\"kurianbenoy/audioclassification\"}],\"summary\":\"Audio + classification is the task of assigning a label or class to a given audio. + It can be used for recognizing which command a user is giving or the emotion + of a statement, as well as identifying a speaker.\",\"widgetModels\":[\"MIT/ast-finetuned-audioset-10-10-0.4593\"],\"youtubeId\":\"KWwzcmG98Ds\",\"id\":\"audio-classification\",\"label\":\"Audio + Classification\",\"libraries\":[\"speechbrain\",\"transformers\",\"transformers.js\"]},\"audio-to-audio\":{\"datasets\":[{\"description\":\"512-element + X-vector embeddings of speakers from CMU ARCTIC dataset.\",\"id\":\"Matthijs/cmu-arctic-xvectors\"}],\"demo\":{\"inputs\":[{\"filename\":\"input.wav\",\"type\":\"audio\"}],\"outputs\":[{\"filename\":\"label-0.wav\",\"type\":\"audio\"},{\"filename\":\"label-1.wav\",\"type\":\"audio\"}]},\"metrics\":[{\"description\":\"The + Signal-to-Noise ratio is the relationship between the target signal level + and the background noise level. It is calculated as the logarithm of the target + signal divided by the background noise, in decibels.\",\"id\":\"snri\"},{\"description\":\"The + Signal-to-Distortion ratio is the relationship between the target signal and + the sum of noise, interference, and artifact errors\",\"id\":\"sdri\"}],\"models\":[{\"description\":\"A + solid model of audio source separation.\",\"id\":\"speechbrain/sepformer-wham\"},{\"description\":\"A + speech enhancement model.\",\"id\":\"ResembleAI/resemble-enhance\"},{\"description\":\"A + model that can change the voice in a speech recording.\",\"id\":\"microsoft/speecht5_vc\"}],\"spaces\":[{\"description\":\"An + application for speech separation.\",\"id\":\"younver/speechbrain-speech-separation\"},{\"description\":\"An + application for audio style transfer.\",\"id\":\"nakas/audio-diffusion_style_transfer\"}],\"summary\":\"Audio-to-Audio + is a family of tasks in which the input is an audio and the output is one + or multiple generated audios. Some example tasks are speech enhancement and + source separation.\",\"widgetModels\":[\"speechbrain/sepformer-wham\"],\"youtubeId\":\"iohj7nCCYoM\",\"id\":\"audio-to-audio\",\"label\":\"Audio-to-Audio\",\"libraries\":[\"asteroid\",\"fairseq\",\"speechbrain\"]},\"automatic-speech-recognition\":{\"datasets\":[{\"description\":\"31,175 + hours of multilingual audio-text dataset in 108 languages.\",\"id\":\"mozilla-foundation/common_voice_17_0\"},{\"description\":\"A + dataset with 44.6k hours of English speaker data and 6k hours of other language + speakers.\",\"id\":\"parler-tts/mls_eng\"},{\"description\":\"A multi-lingual + audio dataset with 370K hours of audio.\",\"id\":\"espnet/yodas\"}],\"demo\":{\"inputs\":[{\"filename\":\"input.flac\",\"type\":\"audio\"}],\"outputs\":[{\"label\":\"Transcript\",\"content\":\"Going + along slushy country roads and speaking to damp audiences in...\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"wer\"},{\"description\":\"\",\"id\":\"cer\"}],\"models\":[{\"description\":\"A + powerful ASR model by OpenAI.\",\"id\":\"openai/whisper-large-v3\"},{\"description\":\"A + good generic speech model by MetaAI for fine-tuning.\",\"id\":\"facebook/w2v-bert-2.0\"},{\"description\":\"An + end-to-end model that performs ASR and Speech Translation by MetaAI.\",\"id\":\"facebook/seamless-m4t-v2-large\"},{\"description\":\"Powerful + speaker diarization model.\",\"id\":\"pyannote/speaker-diarization-3.1\"}],\"spaces\":[{\"description\":\"A + powerful general-purpose speech recognition application.\",\"id\":\"hf-audio/whisper-large-v3\"},{\"description\":\"Fastest + speech recognition application.\",\"id\":\"sanchit-gandhi/whisper-jax\"},{\"description\":\"A + high quality speech and text translation model by Meta.\",\"id\":\"facebook/seamless_m4t\"}],\"summary\":\"Automatic + Speech Recognition (ASR), also known as Speech to Text (STT), is the task + of transcribing a given audio to text. It has many applications, such as voice + user interfaces.\",\"widgetModels\":[\"openai/whisper-large-v3\"],\"youtubeId\":\"TksaY_FDgnk\",\"id\":\"automatic-speech-recognition\",\"label\":\"Automatic + Speech Recognition\",\"libraries\":[\"espnet\",\"nemo\",\"speechbrain\",\"transformers\",\"transformers.js\"]},\"depth-estimation\":{\"datasets\":[{\"description\":\"NYU + Depth V2 Dataset: Video dataset containing both RGB and depth sensor data.\",\"id\":\"sayakpaul/nyu_depth_v2\"},{\"description\":\"Monocular + depth estimation benchmark based without noise and errors.\",\"id\":\"depth-anything/DA-2K\"}],\"demo\":{\"inputs\":[{\"filename\":\"depth-estimation-input.jpg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"depth-estimation-output.png\",\"type\":\"img\"}]},\"metrics\":[],\"models\":[{\"description\":\"Cutting-edge + depth estimation model.\",\"id\":\"depth-anything/Depth-Anything-V2-Large\"},{\"description\":\"A + strong monocular depth estimation model.\",\"id\":\"jingheya/lotus-depth-g-v1-0\"},{\"description\":\"A + depth estimation model that predicts depth in videos.\",\"id\":\"tencent/DepthCrafter\"},{\"description\":\"A + robust depth estimation model.\",\"id\":\"apple/DepthPro\"}],\"spaces\":[{\"description\":\"An + application that predicts the depth of an image and then reconstruct the 3D + model as voxels.\",\"id\":\"radames/dpt-depth-estimation-3d-voxels\"},{\"description\":\"An + application for bleeding-edge depth estimation.\",\"id\":\"akhaliq/depth-pro\"},{\"description\":\"An + application on cutting-edge depth estimation in videos.\",\"id\":\"tencent/DepthCrafter\"},{\"description\":\"A + human-centric depth estimation application.\",\"id\":\"facebook/sapiens-depth\"}],\"summary\":\"Depth + estimation is the task of predicting depth of the objects present in an image.\",\"widgetModels\":[\"\"],\"youtubeId\":\"\",\"id\":\"depth-estimation\",\"label\":\"Depth + Estimation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"document-question-answering\":{\"datasets\":[{\"description\":\"Largest + document understanding dataset.\",\"id\":\"HuggingFaceM4/Docmatix\"},{\"description\":\"Dataset + from the 2020 DocVQA challenge. The documents are taken from the UCSF Industry + Documents Library.\",\"id\":\"eliolio/docvqa\"}],\"demo\":{\"inputs\":[{\"label\":\"Question\",\"content\":\"What + is the idea behind the consumer relations efficiency team?\",\"type\":\"text\"},{\"filename\":\"document-question-answering-input.png\",\"type\":\"img\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"Balance + cost efficiency with quality customer service\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"The + evaluation metric for the DocVQA challenge is the Average Normalized Levenshtein + Similarity (ANLS). This metric is flexible to character regognition errors + and compares the predicted answer with the ground truth answer.\",\"id\":\"anls\"},{\"description\":\"Exact + Match is a metric based on the strict character match of the predicted answer + and the right answer. For answers predicted correctly, the Exact Match will + be 1. Even if only one character is different, Exact Match will be 0\",\"id\":\"exact-match\"}],\"models\":[{\"description\":\"A + robust document question answering model.\",\"id\":\"impira/layoutlm-document-qa\"},{\"description\":\"A + document question answering model specialized in invoices.\",\"id\":\"impira/layoutlm-invoices\"},{\"description\":\"A + special model for OCR-free document question answering.\",\"id\":\"microsoft/udop-large\"},{\"description\":\"A + powerful model for document question answering.\",\"id\":\"google/pix2struct-docvqa-large\"}],\"spaces\":[{\"description\":\"A + robust document question answering application.\",\"id\":\"impira/docquery\"},{\"description\":\"An + application that can answer questions from invoices.\",\"id\":\"impira/invoices\"},{\"description\":\"An + application to compare different document question answering models.\",\"id\":\"merve/compare_docvqa_models\"}],\"summary\":\"Document + Question Answering (also known as Document Visual Question Answering) is the + task of answering questions on document images. Document question answering + models take a (document, question) pair as input and return an answer in natural + language. Models usually rely on multi-modal features, combining text, position + of words (bounding-boxes) and image.\",\"widgetModels\":[\"impira/layoutlm-invoices\"],\"youtubeId\":\"\",\"id\":\"document-question-answering\",\"label\":\"Document + Question Answering\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"feature-extraction\":{\"datasets\":[{\"description\":\"Wikipedia + dataset containing cleaned articles of all languages. Can be used to train + `feature-extraction` models.\",\"id\":\"wikipedia\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"India, + officially the Republic of India, is a country in South Asia.\",\"type\":\"text\"}],\"outputs\":[{\"table\":[[\"Dimension + 1\",\"Dimension 2\",\"Dimension 3\"],[\"2.583383083343506\",\"2.757075071334839\",\"0.9023529887199402\"],[\"8.29393482208252\",\"1.1071064472198486\",\"2.03399395942688\"],[\"-0.7754912972450256\",\"-1.647324562072754\",\"-0.6113331913948059\"],[\"0.07087723910808563\",\"1.5942802429199219\",\"1.4610432386398315\"]],\"type\":\"tabular\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + powerful feature extraction model for natural language processing tasks.\",\"id\":\"thenlper/gte-large\"},{\"description\":\"A + strong feature extraction model for retrieval.\",\"id\":\"Alibaba-NLP/gte-Qwen1.5-7B-instruct\"}],\"spaces\":[{\"description\":\"A + leaderboard to rank text feature extraction models based on a benchmark.\",\"id\":\"mteb/leaderboard\"},{\"description\":\"A + leaderboard to rank best feature extraction models based on human feedback.\",\"id\":\"mteb/arena\"}],\"summary\":\"Feature + extraction is the task of extracting features learnt in a model.\",\"widgetModels\":[\"facebook/bart-base\"],\"id\":\"feature-extraction\",\"label\":\"Feature + Extraction\",\"libraries\":[\"sentence-transformers\",\"transformers\",\"transformers.js\"]},\"fill-mask\":{\"datasets\":[{\"description\":\"A + common dataset that is used to train models for many languages.\",\"id\":\"wikipedia\"},{\"description\":\"A + large English dataset with text crawled from the web.\",\"id\":\"c4\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"The + barked at me\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"wolf\",\"score\":0.487},{\"label\":\"dog\",\"score\":0.061},{\"label\":\"cat\",\"score\":0.058},{\"label\":\"fox\",\"score\":0.047},{\"label\":\"squirrel\",\"score\":0.025}]}]},\"metrics\":[{\"description\":\"Cross + Entropy is a metric that calculates the difference between two probability + distributions. Each probability distribution is the distribution of predicted + words\",\"id\":\"cross_entropy\"},{\"description\":\"Perplexity is the exponential + of the cross-entropy loss. It evaluates the probabilities assigned to the + next word by the model. Lower perplexity indicates better performance\",\"id\":\"perplexity\"}],\"models\":[{\"description\":\"The + famous BERT model.\",\"id\":\"google-bert/bert-base-uncased\"},{\"description\":\"A + multilingual model trained on 100 languages.\",\"id\":\"FacebookAI/xlm-roberta-base\"}],\"spaces\":[],\"summary\":\"Masked + language modeling is the task of masking some of the words in a sentence and + predicting which words should replace those masks. These models are useful + when we want to get a statistical understanding of the language in which the + model is trained in.\",\"widgetModels\":[\"distilroberta-base\"],\"youtubeId\":\"mqElG5QJWUg\",\"id\":\"fill-mask\",\"label\":\"Fill-Mask\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"image-classification\":{\"datasets\":[{\"description\":\"Benchmark + dataset used for image classification with images that belong to 100 classes.\",\"id\":\"cifar100\"},{\"description\":\"Dataset + consisting of images of garments.\",\"id\":\"fashion_mnist\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-classification-input.jpeg\",\"type\":\"img\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Egyptian + cat\",\"score\":0.514},{\"label\":\"Tabby cat\",\"score\":0.193},{\"label\":\"Tiger + cat\",\"score\":0.068}]}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + strong image classification model.\",\"id\":\"google/vit-base-patch16-224\"},{\"description\":\"A + robust image classification model.\",\"id\":\"facebook/deit-base-distilled-patch16-224\"},{\"description\":\"A + strong image classification model.\",\"id\":\"facebook/convnext-large-224\"}],\"spaces\":[{\"description\":\"An + application that classifies what a given image is about.\",\"id\":\"nielsr/perceiver-image-classification\"}],\"summary\":\"Image + classification is the task of assigning a label or class to an entire image. + Images are expected to have only one class for each image. Image classification + models take an image as input and return a prediction about which class the + image belongs to.\",\"widgetModels\":[\"google/vit-base-patch16-224\"],\"youtubeId\":\"tjAIM7BOYhw\",\"id\":\"image-classification\",\"label\":\"Image + Classification\",\"libraries\":[\"keras\",\"timm\",\"transformers\",\"transformers.js\"]},\"image-feature-extraction\":{\"datasets\":[{\"description\":\"ImageNet-1K + is a image classification dataset in which images are used to train image-feature-extraction + models.\",\"id\":\"imagenet-1k\"}],\"demo\":{\"inputs\":[{\"filename\":\"mask-generation-input.png\",\"type\":\"img\"}],\"outputs\":[{\"table\":[[\"Dimension + 1\",\"Dimension 2\",\"Dimension 3\"],[\"0.21236686408519745\",\"1.0919708013534546\",\"0.8512550592422485\"],[\"0.809657871723175\",\"-0.18544459342956543\",\"-0.7851548194885254\"],[\"1.3103108406066895\",\"-0.2479034662246704\",\"-0.9107287526130676\"],[\"1.8536205291748047\",\"-0.36419737339019775\",\"0.09717650711536407\"]],\"type\":\"tabular\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + powerful image feature extraction model.\",\"id\":\"timm/vit_large_patch14_dinov2.lvd142m\"},{\"description\":\"A + strong image feature extraction model.\",\"id\":\"nvidia/MambaVision-T-1K\"},{\"description\":\"A + robust image feature extraction model.\",\"id\":\"facebook/dino-vitb16\"},{\"description\":\"Strong + image feature extraction model made for information retrieval from documents.\",\"id\":\"vidore/colpali\"},{\"description\":\"Strong + image feature extraction model that can be used on images and documents.\",\"id\":\"OpenGVLab/InternViT-6B-448px-V1-2\"}],\"spaces\":[],\"summary\":\"Image + feature extraction is the task of extracting features learnt in a computer + vision model.\",\"widgetModels\":[],\"id\":\"image-feature-extraction\",\"label\":\"Image + Feature Extraction\",\"libraries\":[\"timm\",\"transformers\"]},\"image-segmentation\":{\"datasets\":[{\"description\":\"Scene + segmentation dataset.\",\"id\":\"scene_parse_150\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-segmentation-input.jpeg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"image-segmentation-output.png\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"Average + Precision (AP) is the Area Under the PR Curve (AUC-PR). It is calculated for + each semantic class separately\",\"id\":\"Average Precision\"},{\"description\":\"Mean + Average Precision (mAP) is the overall average of the AP values\",\"id\":\"Mean + Average Precision\"},{\"description\":\"Intersection over Union (IoU) is the + overlap of segmentation masks. Mean IoU is the average of the IoU of all semantic + classes\",\"id\":\"Mean Intersection over Union\"},{\"description\":\"AP\u03B1 + is the Average Precision at the IoU threshold of a \u03B1 value, for example, + AP50 and AP75\",\"id\":\"AP\u03B1\"}],\"models\":[{\"description\":\"Solid + semantic segmentation model trained on ADE20k.\",\"id\":\"openmmlab/upernet-convnext-small\"},{\"description\":\"Background + removal model.\",\"id\":\"briaai/RMBG-1.4\"},{\"description\":\"A multipurpose + image segmentation model for high resolution images.\",\"id\":\"ZhengPeng7/BiRefNet\"},{\"description\":\"Powerful + human-centric image segmentation model.\",\"id\":\"facebook/sapiens-seg-1b\"},{\"description\":\"Panoptic + segmentation model trained on the COCO (common objects) dataset.\",\"id\":\"facebook/mask2former-swin-large-coco-panoptic\"}],\"spaces\":[{\"description\":\"A + semantic segmentation application that can predict unseen instances out of + the box.\",\"id\":\"facebook/ov-seg\"},{\"description\":\"One of the strongest + segmentation applications.\",\"id\":\"jbrinkma/segment-anything\"},{\"description\":\"A + human-centric segmentation model.\",\"id\":\"facebook/sapiens-pose\"},{\"description\":\"An + instance segmentation application to predict neuronal cell types from microscopy + images.\",\"id\":\"rashmi/sartorius-cell-instance-segmentation\"},{\"description\":\"An + application that segments videos.\",\"id\":\"ArtGAN/Segment-Anything-Video\"},{\"description\":\"An + panoptic segmentation application built for outdoor environments.\",\"id\":\"segments/panoptic-segment-anything\"}],\"summary\":\"Image + Segmentation divides an image into segments where each pixel in the image + is mapped to an object. This task has multiple variants such as instance segmentation, + panoptic segmentation and semantic segmentation.\",\"widgetModels\":[\"nvidia/segformer-b0-finetuned-ade-512-512\"],\"youtubeId\":\"dKE8SIt9C-w\",\"id\":\"image-segmentation\",\"label\":\"Image + Segmentation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"image-to-image\":{\"datasets\":[{\"description\":\"Synthetic + dataset, for image relighting\",\"id\":\"VIDIT\"},{\"description\":\"Multiple + images of celebrities, used for facial expression translation\",\"id\":\"huggan/CelebA-faces\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-to-image-input.jpeg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"image-to-image-output.png\",\"type\":\"img\"}]},\"isPlaceholder\":false,\"metrics\":[{\"description\":\"Peak + Signal to Noise Ratio (PSNR) is an approximation of the human perception, + considering the ratio of the absolute intensity with respect to the variations. + Measured in dB, a high value indicates a high fidelity.\",\"id\":\"PSNR\"},{\"description\":\"Structural + Similarity Index (SSIM) is a perceptual metric which compares the luminance, + contrast and structure of two images. The values of SSIM range between -1 + and 1, and higher values indicate closer resemblance to the original image.\",\"id\":\"SSIM\"},{\"description\":\"Inception + Score (IS) is an analysis of the labels predicted by an image classification + model when presented with a sample of the generated images.\",\"id\":\"IS\"}],\"models\":[{\"description\":\"An + image-to-image model to improve image resolution.\",\"id\":\"fal/AuraSR-v2\"},{\"description\":\"A + model that increases the resolution of an image.\",\"id\":\"keras-io/super-resolution\"},{\"description\":\"A + model that creates a set of variations of the input image in the style of + DALL-E using Stable Diffusion.\",\"id\":\"lambdalabs/sd-image-variations-diffusers\"},{\"description\":\"A + model that generates images based on segments in the input image and the text + prompt.\",\"id\":\"mfidabel/controlnet-segment-anything\"},{\"description\":\"A + model that takes an image and an instruction to edit the image.\",\"id\":\"timbrooks/instruct-pix2pix\"}],\"spaces\":[{\"description\":\"Image + enhancer application for low light.\",\"id\":\"keras-io/low-light-image-enhancement\"},{\"description\":\"Style + transfer application.\",\"id\":\"keras-io/neural-style-transfer\"},{\"description\":\"An + application that generates images based on segment control.\",\"id\":\"mfidabel/controlnet-segment-anything\"},{\"description\":\"Image + generation application that takes image control and text prompt.\",\"id\":\"hysts/ControlNet\"},{\"description\":\"Colorize + any image using this app.\",\"id\":\"ioclab/brightness-controlnet\"},{\"description\":\"Edit + images with instructions.\",\"id\":\"timbrooks/instruct-pix2pix\"}],\"summary\":\"Image-to-image + is the task of transforming an input image through a variety of possible manipulations + and enhancements, such as super-resolution, image inpainting, colorization, + and more.\",\"widgetModels\":[\"stabilityai/stable-diffusion-2-inpainting\"],\"youtubeId\":\"\",\"id\":\"image-to-image\",\"label\":\"Image-to-Image\",\"libraries\":[\"diffusers\",\"transformers\",\"transformers.js\"]},\"image-text-to-text\":{\"datasets\":[{\"description\":\"Instructions + composed of image and text.\",\"id\":\"liuhaotian/LLaVA-Instruct-150K\"},{\"description\":\"Conversation + turns where questions involve image and text.\",\"id\":\"liuhaotian/LLaVA-Pretrain\"},{\"description\":\"A + collection of datasets made for model fine-tuning.\",\"id\":\"HuggingFaceM4/the_cauldron\"},{\"description\":\"Screenshots + of websites with their HTML/CSS codes.\",\"id\":\"HuggingFaceM4/WebSight\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-text-to-text-input.png\",\"type\":\"img\"},{\"label\":\"Text + Prompt\",\"content\":\"Describe the position of the bee in detail.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"The + bee is sitting on a pink flower, surrounded by other flowers. The bee is positioned + in the center of the flower, with its head and front legs sticking out.\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"Powerful + vision language model with great visual understanding and reasoning capabilities.\",\"id\":\"meta-llama/Llama-3.2-11B-Vision-Instruct\"},{\"description\":\"Cutting-edge + vision language models.\",\"id\":\"allenai/Molmo-7B-D-0924\"},{\"description\":\"Small + yet powerful model.\",\"id\":\"vikhyatk/moondream2\"},{\"description\":\"Strong + image-text-to-text model.\",\"id\":\"Qwen/Qwen2-VL-7B-Instruct\"},{\"description\":\"Strong + image-text-to-text model.\",\"id\":\"mistralai/Pixtral-12B-2409\"},{\"description\":\"Strong + image-text-to-text model focused on documents.\",\"id\":\"stepfun-ai/GOT-OCR2_0\"}],\"spaces\":[{\"description\":\"Leaderboard + to evaluate vision language models.\",\"id\":\"opencompass/open_vlm_leaderboard\"},{\"description\":\"Vision + language models arena, where models are ranked by votes of users.\",\"id\":\"WildVision/vision-arena\"},{\"description\":\"Powerful + vision-language model assistant.\",\"id\":\"akhaliq/Molmo-7B-D-0924\"},{\"description\":\"An + image-text-to-text application focused on documents.\",\"id\":\"stepfun-ai/GOT_official_online_demo\"},{\"description\":\"An + application to compare outputs of different vision language models.\",\"id\":\"merve/compare_VLMs\"},{\"description\":\"An + application for chatting with an image-text-to-text model.\",\"id\":\"GanymedeNil/Qwen2-VL-7B\"}],\"summary\":\"Image-text-to-text + models take in an image and text prompt and output text. These models are + also called vision-language models, or VLMs. The difference from image-to-text + models is that these models take an additional text input, not restricting + the model to certain use cases like image captioning, and may also be trained + to accept a conversation as input.\",\"widgetModels\":[\"meta-llama/Llama-3.2-11B-Vision-Instruct\"],\"youtubeId\":\"IoGaGfU1CIg\",\"id\":\"image-text-to-text\",\"label\":\"Image-Text-to-Text\",\"libraries\":[\"transformers\"]},\"image-to-text\":{\"datasets\":[{\"description\":\"Dataset + from 12M image-text of Reddit\",\"id\":\"red_caps\"},{\"description\":\"Dataset + from 3.3M images of Google\",\"id\":\"datasets/conceptual_captions\"}],\"demo\":{\"inputs\":[{\"filename\":\"savanna.jpg\",\"type\":\"img\"}],\"outputs\":[{\"label\":\"Detailed + description\",\"content\":\"a herd of giraffes and zebras grazing in a field\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + robust image captioning model.\",\"id\":\"Salesforce/blip2-opt-2.7b\"},{\"description\":\"A + powerful and accurate image-to-text model that can also localize concepts + in images.\",\"id\":\"microsoft/kosmos-2-patch14-224\"},{\"description\":\"A + strong optical character recognition model.\",\"id\":\"facebook/nougat-base\"},{\"description\":\"A + powerful model that lets you have a conversation with the image.\",\"id\":\"llava-hf/llava-1.5-7b-hf\"}],\"spaces\":[{\"description\":\"An + application that compares various image captioning models.\",\"id\":\"nielsr/comparing-captioning-models\"},{\"description\":\"A + robust image captioning application.\",\"id\":\"flax-community/image-captioning\"},{\"description\":\"An + application that transcribes handwritings into text.\",\"id\":\"nielsr/TrOCR-handwritten\"},{\"description\":\"An + application that can caption images and answer questions about a given image.\",\"id\":\"Salesforce/BLIP\"},{\"description\":\"An + application that can caption images and answer questions with a conversational + agent.\",\"id\":\"Salesforce/BLIP2\"},{\"description\":\"An image captioning + application that demonstrates the effect of noise on captions.\",\"id\":\"johko/capdec-image-captioning\"}],\"summary\":\"Image + to text models output a text from a given image. Image captioning or optical + character recognition can be considered as the most common applications of + image to text.\",\"widgetModels\":[\"Salesforce/blip-image-captioning-large\"],\"youtubeId\":\"\",\"id\":\"image-to-text\",\"label\":\"Image-to-Text\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"keypoint-detection\":{\"datasets\":[{\"description\":\"A + dataset of hand keypoints of over 500k examples.\",\"id\":\"Vincent-luo/hagrid-mediapipe-hands\"}],\"demo\":{\"inputs\":[{\"filename\":\"keypoint-detection-input.png\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"keypoint-detection-output.png\",\"type\":\"img\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + robust keypoint detection model.\",\"id\":\"magic-leap-community/superpoint\"},{\"description\":\"Strong + keypoint detection model used to detect human pose.\",\"id\":\"facebook/sapiens-pose-1b\"}],\"spaces\":[{\"description\":\"An + application that detects hand keypoints in real-time.\",\"id\":\"datasciencedojo/Hand-Keypoint-Detection-Realtime\"},{\"description\":\"An + application to try a universal keypoint detection model.\",\"id\":\"merve/SuperPoint\"}],\"summary\":\"Keypoint + detection is the task of identifying meaningful distinctive points or features + in an image.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"keypoint-detection\",\"label\":\"Keypoint + Detection\",\"libraries\":[\"transformers\"]},\"mask-generation\":{\"datasets\":[],\"demo\":{\"inputs\":[{\"filename\":\"mask-generation-input.png\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"mask-generation-output.png\",\"type\":\"img\"}]},\"metrics\":[],\"models\":[{\"description\":\"Small + yet powerful mask generation model.\",\"id\":\"Zigeng/SlimSAM-uniform-50\"},{\"description\":\"Very + strong mask generation model.\",\"id\":\"facebook/sam2-hiera-large\"}],\"spaces\":[{\"description\":\"An + application that combines a mask generation model with a zero-shot object + detection model for text-guided image segmentation.\",\"id\":\"merve/OWLSAM2\"},{\"description\":\"An + application that compares the performance of a large and a small mask generation + model.\",\"id\":\"merve/slimsam\"},{\"description\":\"An application based + on an improved mask generation model.\",\"id\":\"SkalskiP/segment-anything-model-2\"},{\"description\":\"An + application to remove objects from videos using mask generation models.\",\"id\":\"SkalskiP/SAM_and_ProPainter\"}],\"summary\":\"Mask + generation is the task of generating masks that identify a specific object + or region of interest in a given image. Masks are often used in segmentation + tasks, where they provide a precise way to isolate the object of interest + for further processing or analysis.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"mask-generation\",\"label\":\"Mask + Generation\",\"libraries\":[\"transformers\"]},\"object-detection\":{\"datasets\":[{\"description\":\"Widely + used benchmark dataset for multiple vision tasks.\",\"id\":\"merve/coco2017\"},{\"description\":\"Multi-task + computer vision benchmark.\",\"id\":\"merve/pascal-voc\"}],\"demo\":{\"inputs\":[{\"filename\":\"object-detection-input.jpg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"object-detection-output.jpg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + Average Precision (AP) metric is the Area Under the PR Curve (AUC-PR). It + is calculated for each class separately\",\"id\":\"Average Precision\"},{\"description\":\"The + Mean Average Precision (mAP) metric is the overall average of the AP values\",\"id\":\"Mean + Average Precision\"},{\"description\":\"The AP\u03B1 metric is the Average + Precision at the IoU threshold of a \u03B1 value, for example, AP50 and AP75\",\"id\":\"AP\u03B1\"}],\"models\":[{\"description\":\"Solid + object detection model pre-trained on the COCO 2017 dataset.\",\"id\":\"facebook/detr-resnet-50\"},{\"description\":\"Real-time + and accurate object detection model.\",\"id\":\"jameslahm/yolov10x\"},{\"description\":\"Fast + and accurate object detection model trained on COCO and Object365 datasets.\",\"id\":\"PekingU/rtdetr_r18vd_coco_o365\"}],\"spaces\":[{\"description\":\"Leaderboard + to compare various object detection models across several metrics.\",\"id\":\"hf-vision/object_detection_leaderboard\"},{\"description\":\"An + application that contains various object detection models to try from.\",\"id\":\"Gradio-Blocks/Object-Detection-With-DETR-and-YOLOS\"},{\"description\":\"An + application that shows multiple cutting edge techniques for object detection + and tracking.\",\"id\":\"kadirnar/torchyolo\"},{\"description\":\"An object + tracking, segmentation and inpainting application.\",\"id\":\"VIPLab/Track-Anything\"},{\"description\":\"Very + fast object tracking application based on object detection.\",\"id\":\"merve/RT-DETR-tracking-coco\"}],\"summary\":\"Object + Detection models allow users to identify objects of certain defined classes. + Object detection models receive an image as input and output the images with + bounding boxes and labels on detected objects.\",\"widgetModels\":[\"facebook/detr-resnet-50\"],\"youtubeId\":\"WdAeKSOpxhw\",\"id\":\"object-detection\",\"label\":\"Object + Detection\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"video-classification\":{\"datasets\":[{\"description\":\"Benchmark + dataset used for video classification with videos that belong to 400 classes.\",\"id\":\"kinetics400\"}],\"demo\":{\"inputs\":[{\"filename\":\"video-classification-input.gif\",\"type\":\"img\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Playing + Guitar\",\"score\":0.514},{\"label\":\"Playing Tennis\",\"score\":0.193},{\"label\":\"Cooking\",\"score\":0.068}]}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"Strong + Video Classification model trained on the Kinetics 400 dataset.\",\"id\":\"google/vivit-b-16x2-kinetics400\"},{\"description\":\"Strong + Video Classification model trained on the Kinetics 400 dataset.\",\"id\":\"microsoft/xclip-base-patch32\"}],\"spaces\":[{\"description\":\"An + application that classifies video at different timestamps.\",\"id\":\"nateraw/lavila\"},{\"description\":\"An + application that classifies video.\",\"id\":\"fcakyon/video-classification\"}],\"summary\":\"Video + classification is the task of assigning a label or class to an entire video. + Videos are expected to have only one class for each video. Video classification + models take a video as input and return a prediction about which class the + video belongs to.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"video-classification\",\"label\":\"Video + Classification\",\"libraries\":[\"transformers\"]},\"question-answering\":{\"datasets\":[{\"description\":\"A + famous question answering dataset based on English articles from Wikipedia.\",\"id\":\"squad_v2\"},{\"description\":\"A + dataset of aggregated anonymized actual queries issued to the Google search + engine.\",\"id\":\"natural_questions\"}],\"demo\":{\"inputs\":[{\"label\":\"Question\",\"content\":\"Which + name is also used to describe the Amazon rainforest in English?\",\"type\":\"text\"},{\"label\":\"Context\",\"content\":\"The + Amazon rainforest, also known in English as Amazonia or the Amazon Jungle\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"Amazonia\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Exact + Match is a metric based on the strict character match of the predicted answer + and the right answer. For answers predicted correctly, the Exact Match will + be 1. Even if only one character is different, Exact Match will be 0\",\"id\":\"exact-match\"},{\"description\":\" + The F1-Score metric is useful if we value both false positives and false negatives + equally. The F1-Score is calculated on each word in the predicted sequence + against the correct answer\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + robust baseline model for most question answering domains.\",\"id\":\"deepset/roberta-base-squad2\"},{\"description\":\"Small + yet robust model that can answer questions.\",\"id\":\"distilbert/distilbert-base-cased-distilled-squad\"},{\"description\":\"A + special model that can answer questions from tables.\",\"id\":\"google/tapas-base-finetuned-wtq\"}],\"spaces\":[{\"description\":\"An + application that can answer a long question from Wikipedia.\",\"id\":\"deepset/wikipedia-assistant\"}],\"summary\":\"Question + Answering models can retrieve the answer to a question from a given text, + which is useful for searching for an answer in a document. Some question answering + models can generate answers without context!\",\"widgetModels\":[\"deepset/roberta-base-squad2\"],\"youtubeId\":\"ajPx5LwJD-I\",\"id\":\"question-answering\",\"label\":\"Question + Answering\",\"libraries\":[\"adapter-transformers\",\"allennlp\",\"transformers\",\"transformers.js\"]},\"reinforcement-learning\":{\"datasets\":[{\"description\":\"A + curation of widely used datasets for Data Driven Deep Reinforcement Learning + (D4RL)\",\"id\":\"edbeeching/decision_transformer_gym_replay\"}],\"demo\":{\"inputs\":[{\"label\":\"State\",\"content\":\"Red + traffic light, pedestrians are about to pass.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Action\",\"content\":\"Stop + the car.\",\"type\":\"text\"},{\"label\":\"Next State\",\"content\":\"Yellow + light, pedestrians have crossed.\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Accumulated + reward across all time steps discounted by a factor that ranges between 0 + and 1 and determines how much the agent optimizes for future relative to immediate + rewards. Measures how good is the policy ultimately found by a given algorithm + considering uncertainty over the future.\",\"id\":\"Discounted Total Reward\"},{\"description\":\"Average + return obtained after running the policy for a certain number of evaluation + episodes. As opposed to total reward, mean reward considers how much reward + a given algorithm receives while learning.\",\"id\":\"Mean Reward\"},{\"description\":\"Measures + how good a given algorithm is after a predefined time. Some algorithms may + be guaranteed to converge to optimal behavior across many time steps. However, + an agent that reaches an acceptable level of optimality after a given time + horizon may be preferable to one that ultimately reaches optimality but takes + a long time.\",\"id\":\"Level of Performance After Some Time\"}],\"models\":[{\"description\":\"A + Reinforcement Learning model trained on expert data from the Gym Hopper environment\",\"id\":\"edbeeching/decision-transformer-gym-hopper-expert\"},{\"description\":\"A + PPO agent playing seals/CartPole-v0 using the stable-baselines3 library and + the RL Zoo.\",\"id\":\"HumanCompatibleAI/ppo-seals-CartPole-v0\"}],\"spaces\":[{\"description\":\"An + application for a cute puppy agent learning to catch a stick.\",\"id\":\"ThomasSimonini/Huggy\"},{\"description\":\"An + application to play Snowball Fight with a reinforcement learning agent.\",\"id\":\"ThomasSimonini/SnowballFight\"}],\"summary\":\"Reinforcement + learning is the computational approach of learning from action by interacting + with an environment through trial and error and receiving rewards (negative + or positive) as feedback\",\"widgetModels\":[],\"youtubeId\":\"q0BiUn5LiBc\",\"id\":\"reinforcement-learning\",\"label\":\"Reinforcement + Learning\",\"libraries\":[\"transformers\",\"stable-baselines3\",\"ml-agents\",\"sample-factory\"]},\"sentence-similarity\":{\"datasets\":[{\"description\":\"Bing + queries with relevant passages from various web sources.\",\"id\":\"ms_marco\"}],\"demo\":{\"inputs\":[{\"label\":\"Source + sentence\",\"content\":\"Machine learning is so easy.\",\"type\":\"text\"},{\"label\":\"Sentences + to compare to\",\"content\":\"Deep learning is so straightforward.\",\"type\":\"text\"},{\"label\":\"\",\"content\":\"This + is so difficult, like rocket science.\",\"type\":\"text\"},{\"label\":\"\",\"content\":\"I + can't believe how much I struggled with this.\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Deep + learning is so straightforward.\",\"score\":0.623},{\"label\":\"This is so + difficult, like rocket science.\",\"score\":0.413},{\"label\":\"I can't believe + how much I struggled with this.\",\"score\":0.256}]}]},\"metrics\":[{\"description\":\"Reciprocal + Rank is a measure used to rank the relevancy of documents given a set of documents. + Reciprocal Rank is the reciprocal of the rank of the document retrieved, meaning, + if the rank is 3, the Reciprocal Rank is 0.33. If the rank is 1, the Reciprocal + Rank is 1\",\"id\":\"Mean Reciprocal Rank\"},{\"description\":\"The similarity + of the embeddings is evaluated mainly on cosine similarity. It is calculated + as the cosine of the angle between two vectors. It is particularly useful + when your texts are not the same length\",\"id\":\"Cosine Similarity\"}],\"models\":[{\"description\":\"This + model works well for sentences and paragraphs and can be used for clustering/grouping + and semantic searches.\",\"id\":\"sentence-transformers/all-mpnet-base-v2\"},{\"description\":\"A + multilingual robust sentence similarity model..\",\"id\":\"BAAI/bge-m3\"}],\"spaces\":[{\"description\":\"An + application that leverages sentence similarity to answer questions from YouTube + videos.\",\"id\":\"Gradio-Blocks/Ask_Questions_To_YouTube_Videos\"},{\"description\":\"An + application that retrieves relevant PubMed abstracts for a given online article + which can be used as further references.\",\"id\":\"Gradio-Blocks/pubmed-abstract-retriever\"},{\"description\":\"An + application that leverages sentence similarity to summarize text.\",\"id\":\"nickmuchi/article-text-summarizer\"},{\"description\":\"A + guide that explains how Sentence Transformers can be used for semantic search.\",\"id\":\"sentence-transformers/Sentence_Transformers_for_semantic_search\"}],\"summary\":\"Sentence + Similarity is the task of determining how similar two texts are. Sentence + similarity models convert input texts into vectors (embeddings) that capture + semantic information and calculate how close (similar) they are between them. + This task is particularly useful for information retrieval and clustering/grouping.\",\"widgetModels\":[\"BAAI/bge-small-en-v1.5\"],\"youtubeId\":\"VCZq5AkbNEU\",\"id\":\"sentence-similarity\",\"label\":\"Sentence + Similarity\",\"libraries\":[\"sentence-transformers\",\"spacy\",\"transformers.js\"]},\"summarization\":{\"canonicalId\":\"text2text-generation\",\"datasets\":[{\"description\":\"News + articles in five different languages along with their summaries. Widely used + for benchmarking multilingual summarization models.\",\"id\":\"mlsum\"},{\"description\":\"English + conversations and their summaries. Useful for benchmarking conversational + agents.\",\"id\":\"samsum\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"The + tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey + building, and the tallest structure in Paris. Its base is square, measuring + 125 metres (410 ft) on each side. It was the first structure to reach a height + of 300 metres. Excluding transmitters, the Eiffel Tower is the second tallest + free-standing structure in France after the Millau Viaduct.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Output\",\"content\":\"The + tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey + building. It was the first structure to reach a height of 300 metres.\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"The + generated sequence is compared against its summary, and the overlap of tokens + are counted. ROUGE-N refers to overlap of N subsequent tokens, ROUGE-1 refers + to overlap of single tokens and ROUGE-2 is the overlap of two subsequent tokens.\",\"id\":\"rouge\"}],\"models\":[{\"description\":\"A + strong summarization model trained on English news articles. Excels at generating + factual summaries.\",\"id\":\"facebook/bart-large-cnn\"},{\"description\":\"A + summarization model trained on medical articles.\",\"id\":\"Falconsai/medical_summarization\"}],\"spaces\":[{\"description\":\"An + application that can summarize long paragraphs.\",\"id\":\"pszemraj/summarize-long-text\"},{\"description\":\"A + much needed summarization application for terms and conditions.\",\"id\":\"ml6team/distilbart-tos-summarizer-tosdr\"},{\"description\":\"An + application that summarizes long documents.\",\"id\":\"pszemraj/document-summarization\"},{\"description\":\"An + application that can detect errors in abstractive summarization.\",\"id\":\"ml6team/post-processing-summarization\"}],\"summary\":\"Summarization + is the task of producing a shorter version of a document while preserving + its important information. Some models can extract text from the original + input, while other models can generate entirely new text.\",\"widgetModels\":[\"facebook/bart-large-cnn\"],\"youtubeId\":\"yHnr5Dk2zCI\",\"id\":\"summarization\",\"label\":\"Summarization\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"table-question-answering\":{\"datasets\":[{\"description\":\"The + WikiTableQuestions dataset is a large-scale dataset for the task of question + answering on semi-structured tables.\",\"id\":\"wikitablequestions\"},{\"description\":\"WikiSQL + is a dataset of 80654 hand-annotated examples of questions and SQL queries + distributed across 24241 tables from Wikipedia.\",\"id\":\"wikisql\"}],\"demo\":{\"inputs\":[{\"table\":[[\"Rank\",\"Name\",\"No.of + reigns\",\"Combined days\"],[\"1\",\"lou Thesz\",\"3\",\"3749\"],[\"2\",\"Ric + Flair\",\"8\",\"3103\"],[\"3\",\"Harley Race\",\"7\",\"1799\"]],\"type\":\"tabular\"},{\"label\":\"Question\",\"content\":\"What + is the number of reigns for Harley Race?\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Result\",\"content\":\"7\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Checks + whether the predicted answer(s) is the same as the ground-truth answer(s).\",\"id\":\"Denotation + Accuracy\"}],\"models\":[{\"description\":\"A table question answering model + that is capable of neural SQL execution, i.e., employ TAPEX to execute a SQL + query on a given table.\",\"id\":\"microsoft/tapex-base\"},{\"description\":\"A + robust table question answering model.\",\"id\":\"google/tapas-base-finetuned-wtq\"}],\"spaces\":[{\"description\":\"An + application that answers questions based on table CSV files.\",\"id\":\"katanaml/table-query\"}],\"summary\":\"Table + Question Answering (Table QA) is the answering a question about an information + on a given table.\",\"widgetModels\":[\"google/tapas-base-finetuned-wtq\"],\"id\":\"table-question-answering\",\"label\":\"Table + Question Answering\",\"libraries\":[\"transformers\"]},\"tabular-classification\":{\"datasets\":[{\"description\":\"A + comprehensive curation of datasets covering all benchmarks.\",\"id\":\"inria-soda/tabular-benchmark\"}],\"demo\":{\"inputs\":[{\"table\":[[\"Glucose\",\"Blood + Pressure \",\"Skin Thickness\",\"Insulin\",\"BMI\"],[\"148\",\"72\",\"35\",\"0\",\"33.6\"],[\"150\",\"50\",\"30\",\"0\",\"35.1\"],[\"141\",\"60\",\"29\",\"1\",\"39.2\"]],\"type\":\"tabular\"}],\"outputs\":[{\"table\":[[\"Diabetes\"],[\"1\"],[\"1\"],[\"0\"]],\"type\":\"tabular\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"Breast + cancer prediction model based on decision trees.\",\"id\":\"scikit-learn/cancer-prediction-trees\"}],\"spaces\":[{\"description\":\"An + application that can predict defective products on a production line.\",\"id\":\"scikit-learn/tabular-playground\"},{\"description\":\"An + application that compares various tabular classification techniques on different + datasets.\",\"id\":\"scikit-learn/classification\"}],\"summary\":\"Tabular + classification is the task of classifying a target category (a group) based + on set of attributes.\",\"widgetModels\":[\"scikit-learn/tabular-playground\"],\"youtubeId\":\"\",\"id\":\"tabular-classification\",\"label\":\"Tabular + Classification\",\"libraries\":[\"sklearn\"]},\"tabular-regression\":{\"datasets\":[{\"description\":\"A + comprehensive curation of datasets covering all benchmarks.\",\"id\":\"inria-soda/tabular-benchmark\"}],\"demo\":{\"inputs\":[{\"table\":[[\"Car + Name\",\"Horsepower\",\"Weight\"],[\"ford torino\",\"140\",\"3,449\"],[\"amc + hornet\",\"97\",\"2,774\"],[\"toyota corolla\",\"65\",\"1,773\"]],\"type\":\"tabular\"}],\"outputs\":[{\"table\":[[\"MPG + (miles per gallon)\"],[\"17\"],[\"18\"],[\"31\"]],\"type\":\"tabular\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"mse\"},{\"description\":\"Coefficient + of determination (or R-squared) is a measure of how well the model fits the + data. Higher R-squared is considered a better fit.\",\"id\":\"r-squared\"}],\"models\":[{\"description\":\"Fish + weight prediction based on length measurements and species.\",\"id\":\"scikit-learn/Fish-Weight\"}],\"spaces\":[{\"description\":\"An + application that can predict weight of a fish based on set of attributes.\",\"id\":\"scikit-learn/fish-weight-prediction\"}],\"summary\":\"Tabular + regression is the task of predicting a numerical value given a set of attributes.\",\"widgetModels\":[\"scikit-learn/Fish-Weight\"],\"youtubeId\":\"\",\"id\":\"tabular-regression\",\"label\":\"Tabular + Regression\",\"libraries\":[\"sklearn\"]},\"text-classification\":{\"datasets\":[{\"description\":\"A + widely used dataset used to benchmark multiple variants of text classification.\",\"id\":\"nyu-mll/glue\"},{\"description\":\"A + text classification dataset used to benchmark natural language inference models\",\"id\":\"stanfordnlp/snli\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"I + love Hugging Face!\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"POSITIVE\",\"score\":0.9},{\"label\":\"NEUTRAL\",\"score\":0.1},{\"label\":\"NEGATIVE\",\"score\":0}]}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"The + F1 metric is the harmonic mean of the precision and recall. It can be calculated + as: F1 = 2 * (precision * recall) / (precision + recall)\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + robust model trained for sentiment analysis.\",\"id\":\"distilbert/distilbert-base-uncased-finetuned-sst-2-english\"},{\"description\":\"A + sentiment analysis model specialized in financial sentiment.\",\"id\":\"ProsusAI/finbert\"},{\"description\":\"A + sentiment analysis model specialized in analyzing tweets.\",\"id\":\"cardiffnlp/twitter-roberta-base-sentiment-latest\"},{\"description\":\"A + model that can classify languages.\",\"id\":\"papluca/xlm-roberta-base-language-detection\"},{\"description\":\"A + model that can classify text generation attacks.\",\"id\":\"meta-llama/Prompt-Guard-86M\"}],\"spaces\":[{\"description\":\"An + application that can classify financial sentiment.\",\"id\":\"IoannisTr/Tech_Stocks_Trading_Assistant\"},{\"description\":\"A + dashboard that contains various text classification tasks.\",\"id\":\"miesnerjacob/Multi-task-NLP\"},{\"description\":\"An + application that analyzes user reviews in healthcare.\",\"id\":\"spacy/healthsea-demo\"}],\"summary\":\"Text + Classification is the task of assigning a label or class to a given text. + Some use cases are sentiment analysis, natural language inference, and assessing + grammatical correctness.\",\"widgetModels\":[\"distilbert/distilbert-base-uncased-finetuned-sst-2-english\"],\"youtubeId\":\"leNG9fN9FQU\",\"id\":\"text-classification\",\"label\":\"Text + Classification\",\"libraries\":[\"adapter-transformers\",\"setfit\",\"spacy\",\"transformers\",\"transformers.js\"]},\"text-generation\":{\"datasets\":[{\"description\":\"A + large multilingual dataset of text crawled from the web.\",\"id\":\"mc4\"},{\"description\":\"Diverse + open-source data consisting of 22 smaller high-quality datasets. It was used + to train GPT-Neo.\",\"id\":\"the_pile\"},{\"description\":\"Truly open-source, + curated and cleaned dialogue dataset.\",\"id\":\"HuggingFaceH4/ultrachat_200k\"},{\"description\":\"An + instruction dataset with preference ratings on responses.\",\"id\":\"openbmb/UltraFeedback\"},{\"description\":\"A + large synthetic dataset for alignment of text generation models.\",\"id\":\"argilla/magpie-ultra-v0.1\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"Once + upon a time,\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Output\",\"content\":\"Once + upon a time, we knew that our ancestors were on the verge of extinction. The + great explorers and poets of the Old World, from Alexander the Great to Chaucer, + are dead and gone. A good many of our ancient explorers and poets have\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Cross + Entropy is a metric that calculates the difference between two probability + distributions. Each probability distribution is the distribution of predicted + words\",\"id\":\"Cross Entropy\"},{\"description\":\"The Perplexity metric + is the exponential of the cross-entropy loss. It evaluates the probabilities + assigned to the next word by the model. Lower perplexity indicates better + performance\",\"id\":\"Perplexity\"}],\"models\":[{\"description\":\"A text-generation + model trained to follow instructions.\",\"id\":\"google/gemma-2-2b-it\"},{\"description\":\"Very + powerful text generation model trained to follow instructions.\",\"id\":\"meta-llama/Meta-Llama-3.1-8B-Instruct\"},{\"description\":\"Small + yet powerful text generation model.\",\"id\":\"microsoft/Phi-3-mini-4k-instruct\"},{\"description\":\"A + very powerful model that can solve mathematical problems.\",\"id\":\"AI-MO/NuminaMath-7B-TIR\"},{\"description\":\"Strong + text generation model to follow instructions.\",\"id\":\"Qwen/Qwen2.5-7B-Instruct\"},{\"description\":\"Very + strong open-source large language model.\",\"id\":\"nvidia/Llama-3.1-Nemotron-70B-Instruct\"}],\"spaces\":[{\"description\":\"A + leaderboard to compare different open-source text generation models based + on various benchmarks.\",\"id\":\"open-llm-leaderboard/open_llm_leaderboard\"},{\"description\":\"A + leaderboard for comparing chain-of-thought performance of models.\",\"id\":\"logikon/open_cot_leaderboard\"},{\"description\":\"An + text generation based application based on a very powerful LLaMA2 model.\",\"id\":\"ysharma/Explore_llamav2_with_TGI\"},{\"description\":\"An + text generation based application to converse with Zephyr model.\",\"id\":\"HuggingFaceH4/zephyr-chat\"},{\"description\":\"A + leaderboard that ranks text generation models based on blind votes from people.\",\"id\":\"lmsys/chatbot-arena-leaderboard\"},{\"description\":\"An + chatbot to converse with a very powerful text generation model.\",\"id\":\"mlabonne/phixtral-chat\"}],\"summary\":\"Generating + text is the task of generating new text given another text. These models can, + for example, fill in incomplete text or paraphrase.\",\"widgetModels\":[\"mistralai/Mistral-Nemo-Instruct-2407\"],\"youtubeId\":\"e9gNEAlsOvU\",\"id\":\"text-generation\",\"label\":\"Text + Generation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"text-to-image\":{\"datasets\":[{\"description\":\"RedCaps + is a large-scale dataset of 12M image-text pairs collected from Reddit.\",\"id\":\"red_caps\"},{\"description\":\"Conceptual + Captions is a dataset consisting of ~3.3M images annotated with captions.\",\"id\":\"conceptual_captions\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"A + city above clouds, pastel colors, Victorian style\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"image.jpeg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + Inception Score (IS) measure assesses diversity and meaningfulness. It uses + a generated image sample to predict its label. A higher score signifies more + diverse and meaningful images.\",\"id\":\"IS\"},{\"description\":\"The Fr\xE9chet + Inception Distance (FID) calculates the distance between distributions between + synthetic and real samples. A lower FID score indicates better similarity + between the distributions of real and generated images.\",\"id\":\"FID\"},{\"description\":\"R-precision + assesses how the generated image aligns with the provided text description. + It uses the generated images as queries to retrieve relevant text descriptions. + The top 'r' relevant descriptions are selected and used to calculate R-precision + as r/R, where 'R' is the number of ground truth descriptions associated with + the generated images. A higher R-precision value indicates a better model.\",\"id\":\"R-Precision\"}],\"models\":[{\"description\":\"One + of the most powerful image generation models that can generate realistic outputs.\",\"id\":\"black-forest-labs/FLUX.1-dev\"},{\"description\":\"A + powerful yet fast image generation model.\",\"id\":\"latent-consistency/lcm-lora-sdxl\"},{\"description\":\"Text-to-image + model for photorealistic generation.\",\"id\":\"Kwai-Kolors/Kolors\"},{\"description\":\"A + powerful text-to-image model.\",\"id\":\"stabilityai/stable-diffusion-3-medium-diffusers\"}],\"spaces\":[{\"description\":\"A + powerful text-to-image application.\",\"id\":\"stabilityai/stable-diffusion-3-medium\"},{\"description\":\"A + text-to-image application to generate comics.\",\"id\":\"jbilcke-hf/ai-comic-factory\"},{\"description\":\"An + application to match multiple custom image generation models.\",\"id\":\"multimodalart/flux-lora-lab\"},{\"description\":\"A + powerful yet very fast image generation application.\",\"id\":\"latent-consistency/lcm-lora-for-sdxl\"},{\"description\":\"A + gallery to explore various text-to-image models.\",\"id\":\"multimodalart/LoraTheExplorer\"},{\"description\":\"An + application for `text-to-image`, `image-to-image` and image inpainting.\",\"id\":\"ArtGAN/Stable-Diffusion-ControlNet-WebUI\"},{\"description\":\"An + application to generate realistic images given photos of a person and a prompt.\",\"id\":\"InstantX/InstantID\"}],\"summary\":\"Text-to-image + is the task of generating images from input text. These pipelines can also + be used to modify and edit images based on text prompts.\",\"widgetModels\":[\"black-forest-labs/FLUX.1-dev\"],\"youtubeId\":\"\",\"id\":\"text-to-image\",\"label\":\"Text-to-Image\",\"libraries\":[\"diffusers\"]},\"text-to-speech\":{\"canonicalId\":\"text-to-audio\",\"datasets\":[{\"description\":\"10K + hours of multi-speaker English dataset.\",\"id\":\"parler-tts/mls_eng_10k\"},{\"description\":\"Multi-speaker + English dataset.\",\"id\":\"mythicinfinity/libritts_r\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"I + love audio models on the Hub!\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"audio.wav\",\"type\":\"audio\"}]},\"metrics\":[{\"description\":\"The + Mel Cepstral Distortion (MCD) metric is used to calculate the quality of generated + speech.\",\"id\":\"mel cepstral distortion\"}],\"models\":[{\"description\":\"A + powerful TTS model.\",\"id\":\"parler-tts/parler-tts-large-v1\"},{\"description\":\"A + massively multi-lingual TTS model.\",\"id\":\"coqui/XTTS-v2\"},{\"description\":\"Robust + TTS model.\",\"id\":\"metavoiceio/metavoice-1B-v0.1\"},{\"description\":\"A + prompt based, powerful TTS model.\",\"id\":\"parler-tts/parler_tts_mini_v0.1\"}],\"spaces\":[{\"description\":\"An + application for generate highly realistic, multilingual speech.\",\"id\":\"suno/bark\"},{\"description\":\"An + application on XTTS, a voice generation model that lets you clone voices into + different languages.\",\"id\":\"coqui/xtts\"},{\"description\":\"An application + that generates speech in different styles in English and Chinese.\",\"id\":\"mrfakename/E2-F5-TTS\"},{\"description\":\"An + application that synthesizes speech for diverse speaker prompts.\",\"id\":\"parler-tts/parler_tts_mini\"}],\"summary\":\"Text-to-Speech + (TTS) is the task of generating natural sounding speech given text input. + TTS models can be extended to have a single model that generates speech for + multiple speakers and multiple languages.\",\"widgetModels\":[\"suno/bark\"],\"youtubeId\":\"NW62DpzJ274\",\"id\":\"text-to-speech\",\"label\":\"Text-to-Speech\",\"libraries\":[\"espnet\",\"tensorflowtts\",\"transformers\",\"transformers.js\"]},\"text-to-video\":{\"datasets\":[{\"description\":\"Microsoft + Research Video to Text is a large-scale dataset for open domain video captioning\",\"id\":\"iejMac/CLIP-MSR-VTT\"},{\"description\":\"UCF101 + Human Actions dataset consists of 13,320 video clips from YouTube, with 101 + classes.\",\"id\":\"quchenyuan/UCF101-ZIP\"},{\"description\":\"A high-quality + dataset for human action recognition in YouTube videos.\",\"id\":\"nateraw/kinetics\"},{\"description\":\"A + dataset of video clips of humans performing pre-defined basic actions with + everyday objects.\",\"id\":\"HuggingFaceM4/something_something_v2\"},{\"description\":\"This + dataset consists of text-video pairs and contains noisy samples with irrelevant + video descriptions\",\"id\":\"HuggingFaceM4/webvid\"},{\"description\":\"A + dataset of short Flickr videos for the temporal localization of events with + descriptions.\",\"id\":\"iejMac/CLIP-DiDeMo\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"Darth + Vader is surfing on the waves.\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"text-to-video-output.gif\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"Inception + Score uses an image classification model that predicts class labels and evaluates + how distinct and diverse the images are. A higher score indicates better video + generation.\",\"id\":\"is\"},{\"description\":\"Frechet Inception Distance + uses an image classification model to obtain image embeddings. The metric + compares mean and standard deviation of the embeddings of real and generated + images. A smaller score indicates better video generation.\",\"id\":\"fid\"},{\"description\":\"Frechet + Video Distance uses a model that captures coherence for changes in frames + and the quality of each frame. A smaller score indicates better video generation.\",\"id\":\"fvd\"},{\"description\":\"CLIPSIM + measures similarity between video frames and text using an image-text similarity + model. A higher score indicates better video generation.\",\"id\":\"clipsim\"}],\"models\":[{\"description\":\"A + strong model for consistent video generation.\",\"id\":\"rain1011/pyramid-flow-sd3\"},{\"description\":\"A + robust model for text-to-video generation.\",\"id\":\"VideoCrafter/VideoCrafter2\"},{\"description\":\"A + cutting-edge text-to-video generation model.\",\"id\":\"TIGER-Lab/T2V-Turbo-V2\"}],\"spaces\":[{\"description\":\"An + application that generates video from text.\",\"id\":\"VideoCrafter/VideoCrafter\"},{\"description\":\"Consistent + video generation application.\",\"id\":\"TIGER-Lab/T2V-Turbo-V2\"},{\"description\":\"A + cutting edge video generation application.\",\"id\":\"Pyramid-Flow/pyramid-flow\"}],\"summary\":\"Text-to-video + models can be used in any application that requires generating consistent + sequence of images from text. \",\"widgetModels\":[],\"id\":\"text-to-video\",\"label\":\"Text-to-Video\",\"libraries\":[\"diffusers\"]},\"token-classification\":{\"datasets\":[{\"description\":\"A + widely used dataset useful to benchmark named entity recognition models.\",\"id\":\"eriktks/conll2003\"},{\"description\":\"A + multilingual dataset of Wikipedia articles annotated for named entity recognition + in over 150 different languages.\",\"id\":\"unimelb-nlp/wikiann\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"My + name is Omar and I live in Z\xFCrich.\",\"type\":\"text\"}],\"outputs\":[{\"text\":\"My + name is Omar and I live in Z\xFCrich.\",\"tokens\":[{\"type\":\"PERSON\",\"start\":11,\"end\":15},{\"type\":\"GPE\",\"start\":30,\"end\":36}],\"type\":\"text-with-tokens\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + robust performance model to identify people, locations, organizations and + names of miscellaneous entities.\",\"id\":\"dslim/bert-base-NER\"},{\"description\":\"A + strong model to identify people, locations, organizations and names in multiple + languages.\",\"id\":\"FacebookAI/xlm-roberta-large-finetuned-conll03-english\"},{\"description\":\"A + token classification model specialized on medical entity recognition.\",\"id\":\"blaze999/Medical-NER\"},{\"description\":\"Flair + models are typically the state of the art in named entity recognition tasks.\",\"id\":\"flair/ner-english\"}],\"spaces\":[{\"description\":\"An + application that can recognizes entities, extracts noun chunks and recognizes + various linguistic features of each token.\",\"id\":\"spacy/gradio_pipeline_visualizer\"}],\"summary\":\"Token + classification is a natural language understanding task in which a label is + assigned to some tokens in a text. Some popular token classification subtasks + are Named Entity Recognition (NER) and Part-of-Speech (PoS) tagging. NER models + could be trained to identify specific entities in a text, such as dates, individuals + and places; and PoS tagging would identify, for example, which words in a + text are verbs, nouns, and punctuation marks.\",\"widgetModels\":[\"FacebookAI/xlm-roberta-large-finetuned-conll03-english\"],\"youtubeId\":\"wVHdVlPScxA\",\"id\":\"token-classification\",\"label\":\"Token + Classification\",\"libraries\":[\"adapter-transformers\",\"flair\",\"spacy\",\"span-marker\",\"stanza\",\"transformers\",\"transformers.js\"]},\"translation\":{\"canonicalId\":\"text2text-generation\",\"datasets\":[{\"description\":\"A + dataset of copyright-free books translated into 16 different languages.\",\"id\":\"Helsinki-NLP/opus_books\"},{\"description\":\"An + example of translation between programming languages. This dataset consists + of functions in Java and C#.\",\"id\":\"google/code_x_glue_cc_code_to_code_trans\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"My + name is Omar and I live in Z\xFCrich.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Output\",\"content\":\"Mein + Name ist Omar und ich wohne in Z\xFCrich.\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"BLEU + score is calculated by counting the number of shared single or subsequent + tokens between the generated sequence and the reference. Subsequent n tokens + are called \u201Cn-grams\u201D. Unigram refers to a single token while bi-gram + refers to token pairs and n-grams refer to n subsequent tokens. The score + ranges from 0 to 1, where 1 means the translation perfectly matched and 0 + did not match at all\",\"id\":\"bleu\"},{\"description\":\"\",\"id\":\"sacrebleu\"}],\"models\":[{\"description\":\"Very + powerful model that can translate many languages between each other, especially + low-resource languages.\",\"id\":\"facebook/nllb-200-1.3B\"},{\"description\":\"A + general-purpose Transformer that can be used to translate from English to + German, French, or Romanian.\",\"id\":\"google-t5/t5-base\"}],\"spaces\":[{\"description\":\"An + application that can translate between 100 languages.\",\"id\":\"Iker/Translate-100-languages\"},{\"description\":\"An + application that can translate between many languages.\",\"id\":\"Geonmo/nllb-translation-demo\"}],\"summary\":\"Translation + is the task of converting text from one language to another.\",\"widgetModels\":[\"facebook/mbart-large-50-many-to-many-mmt\"],\"youtubeId\":\"1JvfrvZgi6c\",\"id\":\"translation\",\"label\":\"Translation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"unconditional-image-generation\":{\"datasets\":[{\"description\":\"The + CIFAR-100 dataset consists of 60000 32x32 colour images in 100 classes, with + 600 images per class.\",\"id\":\"cifar100\"},{\"description\":\"Multiple images + of celebrities, used for facial expression translation.\",\"id\":\"CelebA\"}],\"demo\":{\"inputs\":[{\"label\":\"Seed\",\"content\":\"42\",\"type\":\"text\"},{\"label\":\"Number + of images to generate:\",\"content\":\"4\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"unconditional-image-generation-output.jpeg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + inception score (IS) evaluates the quality of generated images. It measures + the diversity of the generated images (the model predictions are evenly distributed + across all possible labels) and their 'distinction' or 'sharpness' (the model + confidently predicts a single label for each image).\",\"id\":\"Inception + score (IS)\"},{\"description\":\"The Fr\xE9chet Inception Distance (FID) evaluates + the quality of images created by a generative model by calculating the distance + between feature vectors for real and generated images.\",\"id\":\"Fre\u0107het + Inception Distance (FID)\"}],\"models\":[{\"description\":\"High-quality image + generation model trained on the CIFAR-10 dataset. It synthesizes images of + the ten classes presented in the dataset using diffusion probabilistic models, + a class of latent variable models inspired by considerations from nonequilibrium + thermodynamics.\",\"id\":\"google/ddpm-cifar10-32\"},{\"description\":\"High-quality + image generation model trained on the 256x256 CelebA-HQ dataset. It synthesizes + images of faces using diffusion probabilistic models, a class of latent variable + models inspired by considerations from nonequilibrium thermodynamics.\",\"id\":\"google/ddpm-celebahq-256\"}],\"spaces\":[{\"description\":\"An + application that can generate realistic faces.\",\"id\":\"CompVis/celeba-latent-diffusion\"}],\"summary\":\"Unconditional + image generation is the task of generating images with no condition in any + context (like a prompt text or another image). Once trained, the model will + create images that resemble its training data distribution.\",\"widgetModels\":[\"\"],\"youtubeId\":\"\",\"id\":\"unconditional-image-generation\",\"label\":\"Unconditional + Image Generation\",\"libraries\":[\"diffusers\"]},\"video-text-to-text\":{\"datasets\":[{\"description\":\"Multiple-choice + questions and answers about videos.\",\"id\":\"lmms-lab/Video-MME\"},{\"description\":\"A + dataset of instructions and question-answer pairs about videos.\",\"id\":\"lmms-lab/VideoChatGPT\"},{\"description\":\"Large + video understanding dataset.\",\"id\":\"HuggingFaceFV/finevideo\"}],\"demo\":{\"inputs\":[{\"filename\":\"video-text-to-text-input.gif\",\"type\":\"img\"},{\"label\":\"Text + Prompt\",\"content\":\"What is happening in this video?\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"The + video shows a series of images showing a fountain with water jets and a variety + of colorful flowers and butterflies in the background.\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + robust video-text-to-text model that can take in image and video inputs.\",\"id\":\"llava-hf/llava-onevision-qwen2-72b-ov-hf\"},{\"description\":\"Large + and powerful video-text-to-text model that can take in image and video inputs.\",\"id\":\"llava-hf/LLaVA-NeXT-Video-34B-hf\"}],\"spaces\":[{\"description\":\"An + application to chat with a video-text-to-text model.\",\"id\":\"llava-hf/video-llava\"},{\"description\":\"A + leaderboard for various video-text-to-text models.\",\"id\":\"opencompass/openvlm_video_leaderboard\"}],\"summary\":\"Video-text-to-text + models take in a video and a text prompt and output text. These models are + also called video-language models.\",\"widgetModels\":[\"\"],\"youtubeId\":\"\",\"id\":\"video-text-to-text\",\"label\":\"Video-Text-to-Text\",\"libraries\":[\"transformers\"]},\"visual-question-answering\":{\"datasets\":[{\"description\":\"A + widely used dataset containing questions (with answers) about images.\",\"id\":\"Graphcore/vqa\"},{\"description\":\"A + dataset to benchmark visual reasoning based on text in images.\",\"id\":\"facebook/textvqa\"}],\"demo\":{\"inputs\":[{\"filename\":\"elephant.jpeg\",\"type\":\"img\"},{\"label\":\"Question\",\"content\":\"What + is in this image?\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"elephant\",\"score\":0.97},{\"label\":\"elephants\",\"score\":0.06},{\"label\":\"animal\",\"score\":0.003}]}]},\"isPlaceholder\":false,\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"Measures + how much a predicted answer differs from the ground truth based on the difference + in their semantic meaning.\",\"id\":\"wu-palmer similarity\"}],\"models\":[{\"description\":\"A + visual question answering model trained to convert charts and plots to text.\",\"id\":\"google/deplot\"},{\"description\":\"A + visual question answering model trained for mathematical reasoning and chart + derendering from images.\",\"id\":\"google/matcha-base\"},{\"description\":\"A + strong visual question answering that answers questions from book covers.\",\"id\":\"google/pix2struct-ocrvqa-large\"}],\"spaces\":[{\"description\":\"An + application that compares visual question answering models across different + tasks.\",\"id\":\"merve/pix2struct\"},{\"description\":\"An application that + can answer questions based on images.\",\"id\":\"nielsr/vilt-vqa\"},{\"description\":\"An + application that can caption images and answer questions about a given image. + \",\"id\":\"Salesforce/BLIP\"},{\"description\":\"An application that can + caption images and answer questions about a given image. \",\"id\":\"vumichien/Img2Prompt\"}],\"summary\":\"Visual + Question Answering is the task of answering open-ended questions based on + an image. They output natural language responses to natural language questions.\",\"widgetModels\":[\"dandelin/vilt-b32-finetuned-vqa\"],\"youtubeId\":\"\",\"id\":\"visual-question-answering\",\"label\":\"Visual + Question Answering\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"zero-shot-classification\":{\"datasets\":[{\"description\":\"A + widely used dataset used to benchmark multiple variants of text classification.\",\"id\":\"nyu-mll/glue\"},{\"description\":\"The + Multi-Genre Natural Language Inference (MultiNLI) corpus is a crowd-sourced + collection of 433k sentence pairs annotated with textual entailment information.\",\"id\":\"nyu-mll/multi_nli\"},{\"description\":\"FEVER + is a publicly available dataset for fact extraction and verification against + textual sources.\",\"id\":\"fever/fever\"}],\"demo\":{\"inputs\":[{\"label\":\"Text + Input\",\"content\":\"Dune is the best movie ever.\",\"type\":\"text\"},{\"label\":\"Candidate + Labels\",\"content\":\"CINEMA, ART, MUSIC\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"CINEMA\",\"score\":0.9},{\"label\":\"ART\",\"score\":0.1},{\"label\":\"MUSIC\",\"score\":0}]}]},\"metrics\":[],\"models\":[{\"description\":\"Powerful + zero-shot text classification model.\",\"id\":\"facebook/bart-large-mnli\"},{\"description\":\"Powerful + zero-shot multilingual text classification model that can accomplish multiple + tasks.\",\"id\":\"MoritzLaurer/mDeBERTa-v3-base-xnli-multilingual-nli-2mil7\"}],\"spaces\":[],\"summary\":\"Zero-shot + text classification is a task in natural language processing where a model + is trained on a set of labeled examples but is then able to classify new examples + from previously unseen classes.\",\"widgetModels\":[\"facebook/bart-large-mnli\"],\"id\":\"zero-shot-classification\",\"label\":\"Zero-Shot + Classification\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"zero-shot-image-classification\":{\"datasets\":[{\"description\":\"\",\"id\":\"\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-classification-input.jpeg\",\"type\":\"img\"},{\"label\":\"Classes\",\"content\":\"cat, + dog, bird\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Cat\",\"score\":0.664},{\"label\":\"Dog\",\"score\":0.329},{\"label\":\"Bird\",\"score\":0.008}]}]},\"metrics\":[{\"description\":\"Computes + the number of times the correct label appears in top K labels predicted\",\"id\":\"top-K + accuracy\"}],\"models\":[{\"description\":\"Robust image classification model + trained on publicly available image-caption data.\",\"id\":\"openai/clip-vit-base-patch16\"},{\"description\":\"Strong + zero-shot image classification model.\",\"id\":\"google/siglip-so400m-patch14-224\"},{\"description\":\"Small + yet powerful zero-shot image classification model that can run on edge devices.\",\"id\":\"apple/MobileCLIP-S1-OpenCLIP\"},{\"description\":\"Strong + image classification model for biomedical domain.\",\"id\":\"microsoft/BiomedCLIP-PubMedBERT_256-vit_base_patch16_224\"}],\"spaces\":[{\"description\":\"An + application that leverages zero-shot image classification to find best captions + to generate an image. \",\"id\":\"pharma/CLIP-Interrogator\"},{\"description\":\"An + application to compare different zero-shot image classification models. \",\"id\":\"merve/compare_clip_siglip\"}],\"summary\":\"Zero-shot + image classification is the task of classifying previously unseen classes + during training of a model.\",\"widgetModels\":[\"google/siglip-so400m-patch14-224\"],\"youtubeId\":\"\",\"id\":\"zero-shot-image-classification\",\"label\":\"Zero-Shot + Image Classification\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"zero-shot-object-detection\":{\"datasets\":[],\"demo\":{\"inputs\":[{\"filename\":\"zero-shot-object-detection-input.jpg\",\"type\":\"img\"},{\"label\":\"Classes\",\"content\":\"cat, + dog, bird\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"zero-shot-object-detection-output.jpg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + Average Precision (AP) metric is the Area Under the PR Curve (AUC-PR). It + is calculated for each class separately\",\"id\":\"Average Precision\"},{\"description\":\"The + Mean Average Precision (mAP) metric is the overall average of the AP values\",\"id\":\"Mean + Average Precision\"},{\"description\":\"The AP\u03B1 metric is the Average + Precision at the IoU threshold of a \u03B1 value, for example, AP50 and AP75\",\"id\":\"AP\u03B1\"}],\"models\":[{\"description\":\"Solid + zero-shot object detection model.\",\"id\":\"IDEA-Research/grounding-dino-base\"},{\"description\":\"Cutting-edge + zero-shot object detection model.\",\"id\":\"google/owlv2-base-patch16-ensemble\"}],\"spaces\":[{\"description\":\"A + demo to try the state-of-the-art zero-shot object detection model, OWLv2.\",\"id\":\"merve/owlv2\"},{\"description\":\"A + demo that combines a zero-shot object detection and mask generation model + for zero-shot segmentation.\",\"id\":\"merve/OWLSAM\"}],\"summary\":\"Zero-shot + object detection is a computer vision task to detect objects and their classes + in images, without any prior training or knowledge of the classes. Zero-shot + object detection models receive an image as input, as well as a list of candidate + classes, and output the bounding boxes and labels where the objects have been + detected.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"zero-shot-object-detection\",\"label\":\"Zero-Shot + Object Detection\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"text-to-3d\":{\"datasets\":[{\"description\":\"A + large dataset of over 10 million 3D objects.\",\"id\":\"allenai/objaverse-xl\"},{\"description\":\"Descriptive + captions for 3D objects in Objaverse.\",\"id\":\"tiange/Cap3D\"}],\"demo\":{\"inputs\":[{\"label\":\"Prompt\",\"content\":\"a + cat statue\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Result\",\"content\":\"text-to-3d-3d-output-filename.glb\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"Text-to-3D + mesh model by OpenAI\",\"id\":\"openai/shap-e\"},{\"description\":\"Generative + 3D gaussian splatting model.\",\"id\":\"ashawkey/LGM\"}],\"spaces\":[{\"description\":\"Text-to-3D + demo with mesh outputs.\",\"id\":\"hysts/Shap-E\"},{\"description\":\"Text/image-to-3D + demo with splat outputs.\",\"id\":\"ashawkey/LGM\"}],\"summary\":\"Text-to-3D + models take in text input and produce 3D output.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"text-to-3d\",\"label\":\"Text-to-3D\",\"libraries\":[\"diffusers\"]},\"image-to-3d\":{\"datasets\":[{\"description\":\"A + large dataset of over 10 million 3D objects.\",\"id\":\"allenai/objaverse-xl\"},{\"description\":\"A + dataset of isolated object images for evaluating image-to-3D models.\",\"id\":\"dylanebert/iso3d\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-to-3d-image-input.png\",\"type\":\"img\"}],\"outputs\":[{\"label\":\"Result\",\"content\":\"image-to-3d-3d-output-filename.glb\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"Fast + image-to-3D mesh model by Tencent.\",\"id\":\"TencentARC/InstantMesh\"},{\"description\":\"Fast + image-to-3D mesh model by StabilityAI\",\"id\":\"stabilityai/TripoSR\"},{\"description\":\"A + scaled up image-to-3D mesh model derived from TripoSR.\",\"id\":\"hwjiang/Real3D\"},{\"description\":\"Generative + 3D gaussian splatting model.\",\"id\":\"ashawkey/LGM\"}],\"spaces\":[{\"description\":\"Leaderboard + to evaluate image-to-3D models.\",\"id\":\"dylanebert/3d-arena\"},{\"description\":\"Image-to-3D + demo with mesh outputs.\",\"id\":\"TencentARC/InstantMesh\"},{\"description\":\"Image-to-3D + demo with mesh outputs.\",\"id\":\"stabilityai/TripoSR\"},{\"description\":\"Image-to-3D + demo with mesh outputs.\",\"id\":\"hwjiang/Real3D\"},{\"description\":\"Image-to-3D + demo with splat outputs.\",\"id\":\"dylanebert/LGM-mini\"}],\"summary\":\"Image-to-3D + models take in image input and produce 3D output.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"image-to-3d\",\"label\":\"Image-to-3D\",\"libraries\":[\"diffusers\"]}}" + headers: + Access-Control-Allow-Origin: + - https://huggingface.co + Access-Control-Expose-Headers: + - X-Repo-Commit,X-Request-Id,X-Error-Code,X-Error-Message,X-Total-Count,ETag,Link,Accept-Ranges,Content-Range + Connection: + - keep-alive + Content-Length: + - '73726' + Content-Type: + - application/json; charset=utf-8 + Date: + - Thu, 31 Oct 2024 14:29:11 GMT + ETag: + - W/"11ffe-LYeRHzSmA1Ja2XTx5UWNPygcpnc" + Referrer-Policy: + - strict-origin-when-cross-origin + Vary: + - Origin + Via: + - 1.1 d54fda447b0b0eaf56389dbaa70fabb2.cloudfront.net (CloudFront) + X-Amz-Cf-Id: + - DoJy2waIh0VrfwzeIcDLVg6ujCuSV2gMZnjIHOfA38YTfXJl0KNijw== + X-Amz-Cf-Pop: + - CCU50-P1 + X-Cache: + - Miss from cloudfront + X-Powered-By: + - huggingface-moon + X-Request-Id: + - Root=1-67239437-1c2afb47708aeae60d71fde4;4067bfb0-673e-46d9-ab20-10f62a9c633c + cross-origin-opener-policy: + - same-origin + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate, br, zstd + Connection: + - keep-alive + User-Agent: + - python-requests/2.32.3 + X-Amzn-Trace-Id: + - f7b7d8b0-57c3-4ebf-8ae9-465bdec41035 + method: GET + uri: https://upload.wikimedia.org/wikipedia/commons/thumb/4/43/Cute_dog.jpg/320px-Cute_dog.jpg + response: + body: + string: "\n\n\nWikimedia + Error\n\n\n
\n\"Wikimedia\"\n\n

Error

\n
\n\n

Our servers are currently under maintenance + or experiencing a technical issue

\n
\n
\n

If + you report this error to the Wikimedia System Administrators, please include + the details below.

Request from 202.142.110.32 + via cp5029 cp5029, Varnish XID 755780929
Upstream caches: cp5029 int
Error: + 403, Forbidden. Please comply with the User-Agent policy: https://meta.wikimedia.org/wiki/User-Agent_policy + at Thu, 31 Oct 2024 14:29:11 GMT

\n
\n\n" + headers: + access-control-allow-origin: + - '*' + access-control-expose-headers: + - Age, Date, Content-Length, Content-Range, X-Content-Duration, X-Cache + content-length: + - '2001' + content-type: + - text/html; charset=utf-8 + date: + - Thu, 31 Oct 2024 14:29:11 GMT + nel: + - '{ "report_to": "wm_nel", "max_age": 604800, "failure_fraction": 0.05, "success_fraction": + 0.0}' + report-to: + - '{ "group": "wm_nel", "max_age": 604800, "endpoints": [{ "url": "https://intake-logging.wikimedia.org/v1/events?stream=w3c.reportingapi.network_error&schema_uri=/w3c/reportingapi/network_error/1.0.0" + }] }' + server: + - Varnish + server-timing: + - cache;desc="int-front", host;desc="cp5029" + strict-transport-security: + - max-age=106384710; includeSubDomains; preload + timing-allow-origin: + - '*' + x-cache: + - cp5029 int + x-cache-status: + - int-front + x-client-ip: + - 202.142.110.32 + status: + code: 403 + message: 'Forbidden. Please comply with the User-Agent policy: https://meta.wikimedia.org/wiki/User-Agent_policy' +- request: + body: "\n\n\nWikimedia + Error\n\n\n
\n\"Wikimedia\"\n\n

Error

\n
\n\n

Our + servers are currently under maintenance or experiencing a technical issue

\n
\n
\n

If you report this error to the Wikimedia System Administrators, + please include the details below.

Request from + 202.142.110.32 via cp5029 cp5029, Varnish XID 755780929
Upstream caches: + cp5029 int
Error: 403, Forbidden. Please comply with the User-Agent policy: + https://meta.wikimedia.org/wiki/User-Agent_policy at Thu, 31 Oct 2024 14:29:11 + GMT

\n
\n\n" + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate, br, zstd + Connection: + - keep-alive + Content-Length: + - '2001' + X-Amzn-Trace-Id: + - 42d6bef7-a171-416f-9fef-15d6c8bb2e7d + user-agent: + - unknown/None; hf_hub/0.26.2; python/3.9.12; torch/2.4.1 + method: POST + uri: https://api-inference.huggingface.co/models/google/vit-base-patch16-224 + response: + body: + string: '{"error":"Model google/vit-base-patch16-224 is currently loading","estimated_time":20.0}' + headers: + Connection: + - keep-alive + Content-Length: + - '88' + Content-Type: + - application/json + Date: + - Thu, 31 Oct 2024 14:29:12 GMT + access-control-allow-credentials: + - 'true' + vary: + - Origin, Access-Control-Request-Method, Access-Control-Request-Headers + x-request-id: + - V8Y2qEzE9mNEsDRn5xkfs + status: + code: 503 + message: Service Unavailable +- request: + body: null + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate, br, zstd + Connection: + - keep-alive + User-Agent: + - python-requests/2.32.3 + X-Amzn-Trace-Id: + - a0ad5ea7-c5b7-434d-a0f5-99d446a210f7 + method: GET + uri: https://upload.wikimedia.org/wikipedia/commons/thumb/4/43/Cute_dog.jpg/320px-Cute_dog.jpg + response: + body: + string: "\n\n\nWikimedia + Error\n\n\n
\n\"Wikimedia\"\n\n

Error

\n
\n\n

Our servers are currently under maintenance + or experiencing a technical issue

\n
\n
\n

If + you report this error to the Wikimedia System Administrators, please include + the details below.

Request from 202.142.110.32 + via cp5029 cp5029, Varnish XID 772508154
Upstream caches: cp5029 int
Error: + 403, Forbidden. Please comply with the User-Agent policy: https://meta.wikimedia.org/wiki/User-Agent_policy + at Thu, 31 Oct 2024 14:29:14 GMT

\n
\n\n" + headers: + access-control-allow-origin: + - '*' + access-control-expose-headers: + - Age, Date, Content-Length, Content-Range, X-Content-Duration, X-Cache + content-length: + - '2001' + content-type: + - text/html; charset=utf-8 + date: + - Thu, 31 Oct 2024 14:29:14 GMT + nel: + - '{ "report_to": "wm_nel", "max_age": 604800, "failure_fraction": 0.05, "success_fraction": + 0.0}' + report-to: + - '{ "group": "wm_nel", "max_age": 604800, "endpoints": [{ "url": "https://intake-logging.wikimedia.org/v1/events?stream=w3c.reportingapi.network_error&schema_uri=/w3c/reportingapi/network_error/1.0.0" + }] }' + server: + - Varnish + server-timing: + - cache;desc="int-front", host;desc="cp5029" + strict-transport-security: + - max-age=106384710; includeSubDomains; preload + timing-allow-origin: + - '*' + x-cache: + - cp5029 int + x-cache-status: + - int-front + x-client-ip: + - 202.142.110.32 + status: + code: 403 + message: 'Forbidden. Please comply with the User-Agent policy: https://meta.wikimedia.org/wiki/User-Agent_policy' +- request: + body: "\n\n\nWikimedia + Error\n\n\n
\n\"Wikimedia\"\n\n

Error

\n
\n\n

Our + servers are currently under maintenance or experiencing a technical issue

\n
\n
\n

If you report this error to the Wikimedia System Administrators, + please include the details below.

Request from + 202.142.110.32 via cp5029 cp5029, Varnish XID 772508154
Upstream caches: + cp5029 int
Error: 403, Forbidden. Please comply with the User-Agent policy: + https://meta.wikimedia.org/wiki/User-Agent_policy at Thu, 31 Oct 2024 14:29:14 + GMT

\n
\n\n" + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate, br, zstd + Connection: + - keep-alive + Content-Length: + - '2001' + X-Amzn-Trace-Id: + - c7251d5f-96af-4ec8-9984-810a43c01fe3 + X-wait-for-model: + - '1' + user-agent: + - unknown/None; hf_hub/0.26.2; python/3.9.12; torch/2.4.1 + method: POST + uri: https://api-inference.huggingface.co/models/google/vit-base-patch16-224 + response: + body: + string: '{"error":["Error in `inputs`: Invalid image: b''\\n barked at me\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"wolf\",\"score\":0.487},{\"label\":\"dog\",\"score\":0.061},{\"label\":\"cat\",\"score\":0.058},{\"label\":\"fox\",\"score\":0.047},{\"label\":\"squirrel\",\"score\":0.025}]}]},\"metrics\":[{\"description\":\"Cross + Entropy is a metric that calculates the difference between two probability + distributions. Each probability distribution is the distribution of predicted + words\",\"id\":\"cross_entropy\"},{\"description\":\"Perplexity is the exponential + of the cross-entropy loss. It evaluates the probabilities assigned to the + next word by the model. Lower perplexity indicates better performance\",\"id\":\"perplexity\"}],\"models\":[{\"description\":\"The + famous BERT model.\",\"id\":\"google-bert/bert-base-uncased\"},{\"description\":\"A + multilingual model trained on 100 languages.\",\"id\":\"FacebookAI/xlm-roberta-base\"}],\"spaces\":[],\"summary\":\"Masked + language modeling is the task of masking some of the words in a sentence and + predicting which words should replace those masks. These models are useful + when we want to get a statistical understanding of the language in which the + model is trained in.\",\"widgetModels\":[\"distilroberta-base\"],\"youtubeId\":\"mqElG5QJWUg\",\"id\":\"fill-mask\",\"label\":\"Fill-Mask\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"image-classification\":{\"datasets\":[{\"description\":\"Benchmark + dataset used for image classification with images that belong to 100 classes.\",\"id\":\"cifar100\"},{\"description\":\"Dataset + consisting of images of garments.\",\"id\":\"fashion_mnist\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-classification-input.jpeg\",\"type\":\"img\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Egyptian + cat\",\"score\":0.514},{\"label\":\"Tabby cat\",\"score\":0.193},{\"label\":\"Tiger + cat\",\"score\":0.068}]}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + strong image classification model.\",\"id\":\"google/vit-base-patch16-224\"},{\"description\":\"A + robust image classification model.\",\"id\":\"facebook/deit-base-distilled-patch16-224\"},{\"description\":\"A + strong image classification model.\",\"id\":\"facebook/convnext-large-224\"}],\"spaces\":[{\"description\":\"An + application that classifies what a given image is about.\",\"id\":\"nielsr/perceiver-image-classification\"}],\"summary\":\"Image + classification is the task of assigning a label or class to an entire image. + Images are expected to have only one class for each image. Image classification + models take an image as input and return a prediction about which class the + image belongs to.\",\"widgetModels\":[\"google/vit-base-patch16-224\"],\"youtubeId\":\"tjAIM7BOYhw\",\"id\":\"image-classification\",\"label\":\"Image + Classification\",\"libraries\":[\"keras\",\"timm\",\"transformers\",\"transformers.js\"]},\"image-feature-extraction\":{\"datasets\":[{\"description\":\"ImageNet-1K + is a image classification dataset in which images are used to train image-feature-extraction + models.\",\"id\":\"imagenet-1k\"}],\"demo\":{\"inputs\":[{\"filename\":\"mask-generation-input.png\",\"type\":\"img\"}],\"outputs\":[{\"table\":[[\"Dimension + 1\",\"Dimension 2\",\"Dimension 3\"],[\"0.21236686408519745\",\"1.0919708013534546\",\"0.8512550592422485\"],[\"0.809657871723175\",\"-0.18544459342956543\",\"-0.7851548194885254\"],[\"1.3103108406066895\",\"-0.2479034662246704\",\"-0.9107287526130676\"],[\"1.8536205291748047\",\"-0.36419737339019775\",\"0.09717650711536407\"]],\"type\":\"tabular\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + powerful image feature extraction model.\",\"id\":\"timm/vit_large_patch14_dinov2.lvd142m\"},{\"description\":\"A + strong image feature extraction model.\",\"id\":\"nvidia/MambaVision-T-1K\"},{\"description\":\"A + robust image feature extraction model.\",\"id\":\"facebook/dino-vitb16\"},{\"description\":\"Strong + image feature extraction model made for information retrieval from documents.\",\"id\":\"vidore/colpali\"},{\"description\":\"Strong + image feature extraction model that can be used on images and documents.\",\"id\":\"OpenGVLab/InternViT-6B-448px-V1-2\"}],\"spaces\":[],\"summary\":\"Image + feature extraction is the task of extracting features learnt in a computer + vision model.\",\"widgetModels\":[],\"id\":\"image-feature-extraction\",\"label\":\"Image + Feature Extraction\",\"libraries\":[\"timm\",\"transformers\"]},\"image-segmentation\":{\"datasets\":[{\"description\":\"Scene + segmentation dataset.\",\"id\":\"scene_parse_150\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-segmentation-input.jpeg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"image-segmentation-output.png\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"Average + Precision (AP) is the Area Under the PR Curve (AUC-PR). It is calculated for + each semantic class separately\",\"id\":\"Average Precision\"},{\"description\":\"Mean + Average Precision (mAP) is the overall average of the AP values\",\"id\":\"Mean + Average Precision\"},{\"description\":\"Intersection over Union (IoU) is the + overlap of segmentation masks. Mean IoU is the average of the IoU of all semantic + classes\",\"id\":\"Mean Intersection over Union\"},{\"description\":\"AP\u03B1 + is the Average Precision at the IoU threshold of a \u03B1 value, for example, + AP50 and AP75\",\"id\":\"AP\u03B1\"}],\"models\":[{\"description\":\"Solid + semantic segmentation model trained on ADE20k.\",\"id\":\"openmmlab/upernet-convnext-small\"},{\"description\":\"Background + removal model.\",\"id\":\"briaai/RMBG-1.4\"},{\"description\":\"A multipurpose + image segmentation model for high resolution images.\",\"id\":\"ZhengPeng7/BiRefNet\"},{\"description\":\"Powerful + human-centric image segmentation model.\",\"id\":\"facebook/sapiens-seg-1b\"},{\"description\":\"Panoptic + segmentation model trained on the COCO (common objects) dataset.\",\"id\":\"facebook/mask2former-swin-large-coco-panoptic\"}],\"spaces\":[{\"description\":\"A + semantic segmentation application that can predict unseen instances out of + the box.\",\"id\":\"facebook/ov-seg\"},{\"description\":\"One of the strongest + segmentation applications.\",\"id\":\"jbrinkma/segment-anything\"},{\"description\":\"A + human-centric segmentation model.\",\"id\":\"facebook/sapiens-pose\"},{\"description\":\"An + instance segmentation application to predict neuronal cell types from microscopy + images.\",\"id\":\"rashmi/sartorius-cell-instance-segmentation\"},{\"description\":\"An + application that segments videos.\",\"id\":\"ArtGAN/Segment-Anything-Video\"},{\"description\":\"An + panoptic segmentation application built for outdoor environments.\",\"id\":\"segments/panoptic-segment-anything\"}],\"summary\":\"Image + Segmentation divides an image into segments where each pixel in the image + is mapped to an object. This task has multiple variants such as instance segmentation, + panoptic segmentation and semantic segmentation.\",\"widgetModels\":[\"nvidia/segformer-b0-finetuned-ade-512-512\"],\"youtubeId\":\"dKE8SIt9C-w\",\"id\":\"image-segmentation\",\"label\":\"Image + Segmentation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"image-to-image\":{\"datasets\":[{\"description\":\"Synthetic + dataset, for image relighting\",\"id\":\"VIDIT\"},{\"description\":\"Multiple + images of celebrities, used for facial expression translation\",\"id\":\"huggan/CelebA-faces\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-to-image-input.jpeg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"image-to-image-output.png\",\"type\":\"img\"}]},\"isPlaceholder\":false,\"metrics\":[{\"description\":\"Peak + Signal to Noise Ratio (PSNR) is an approximation of the human perception, + considering the ratio of the absolute intensity with respect to the variations. + Measured in dB, a high value indicates a high fidelity.\",\"id\":\"PSNR\"},{\"description\":\"Structural + Similarity Index (SSIM) is a perceptual metric which compares the luminance, + contrast and structure of two images. The values of SSIM range between -1 + and 1, and higher values indicate closer resemblance to the original image.\",\"id\":\"SSIM\"},{\"description\":\"Inception + Score (IS) is an analysis of the labels predicted by an image classification + model when presented with a sample of the generated images.\",\"id\":\"IS\"}],\"models\":[{\"description\":\"An + image-to-image model to improve image resolution.\",\"id\":\"fal/AuraSR-v2\"},{\"description\":\"A + model that increases the resolution of an image.\",\"id\":\"keras-io/super-resolution\"},{\"description\":\"A + model that creates a set of variations of the input image in the style of + DALL-E using Stable Diffusion.\",\"id\":\"lambdalabs/sd-image-variations-diffusers\"},{\"description\":\"A + model that generates images based on segments in the input image and the text + prompt.\",\"id\":\"mfidabel/controlnet-segment-anything\"},{\"description\":\"A + model that takes an image and an instruction to edit the image.\",\"id\":\"timbrooks/instruct-pix2pix\"}],\"spaces\":[{\"description\":\"Image + enhancer application for low light.\",\"id\":\"keras-io/low-light-image-enhancement\"},{\"description\":\"Style + transfer application.\",\"id\":\"keras-io/neural-style-transfer\"},{\"description\":\"An + application that generates images based on segment control.\",\"id\":\"mfidabel/controlnet-segment-anything\"},{\"description\":\"Image + generation application that takes image control and text prompt.\",\"id\":\"hysts/ControlNet\"},{\"description\":\"Colorize + any image using this app.\",\"id\":\"ioclab/brightness-controlnet\"},{\"description\":\"Edit + images with instructions.\",\"id\":\"timbrooks/instruct-pix2pix\"}],\"summary\":\"Image-to-image + is the task of transforming an input image through a variety of possible manipulations + and enhancements, such as super-resolution, image inpainting, colorization, + and more.\",\"widgetModels\":[\"stabilityai/stable-diffusion-2-inpainting\"],\"youtubeId\":\"\",\"id\":\"image-to-image\",\"label\":\"Image-to-Image\",\"libraries\":[\"diffusers\",\"transformers\",\"transformers.js\"]},\"image-text-to-text\":{\"datasets\":[{\"description\":\"Instructions + composed of image and text.\",\"id\":\"liuhaotian/LLaVA-Instruct-150K\"},{\"description\":\"Conversation + turns where questions involve image and text.\",\"id\":\"liuhaotian/LLaVA-Pretrain\"},{\"description\":\"A + collection of datasets made for model fine-tuning.\",\"id\":\"HuggingFaceM4/the_cauldron\"},{\"description\":\"Screenshots + of websites with their HTML/CSS codes.\",\"id\":\"HuggingFaceM4/WebSight\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-text-to-text-input.png\",\"type\":\"img\"},{\"label\":\"Text + Prompt\",\"content\":\"Describe the position of the bee in detail.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"The + bee is sitting on a pink flower, surrounded by other flowers. The bee is positioned + in the center of the flower, with its head and front legs sticking out.\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"Powerful + vision language model with great visual understanding and reasoning capabilities.\",\"id\":\"meta-llama/Llama-3.2-11B-Vision-Instruct\"},{\"description\":\"Cutting-edge + vision language models.\",\"id\":\"allenai/Molmo-7B-D-0924\"},{\"description\":\"Small + yet powerful model.\",\"id\":\"vikhyatk/moondream2\"},{\"description\":\"Strong + image-text-to-text model.\",\"id\":\"Qwen/Qwen2-VL-7B-Instruct\"},{\"description\":\"Strong + image-text-to-text model.\",\"id\":\"mistralai/Pixtral-12B-2409\"},{\"description\":\"Strong + image-text-to-text model focused on documents.\",\"id\":\"stepfun-ai/GOT-OCR2_0\"}],\"spaces\":[{\"description\":\"Leaderboard + to evaluate vision language models.\",\"id\":\"opencompass/open_vlm_leaderboard\"},{\"description\":\"Vision + language models arena, where models are ranked by votes of users.\",\"id\":\"WildVision/vision-arena\"},{\"description\":\"Powerful + vision-language model assistant.\",\"id\":\"akhaliq/Molmo-7B-D-0924\"},{\"description\":\"An + image-text-to-text application focused on documents.\",\"id\":\"stepfun-ai/GOT_official_online_demo\"},{\"description\":\"An + application to compare outputs of different vision language models.\",\"id\":\"merve/compare_VLMs\"},{\"description\":\"An + application for chatting with an image-text-to-text model.\",\"id\":\"GanymedeNil/Qwen2-VL-7B\"}],\"summary\":\"Image-text-to-text + models take in an image and text prompt and output text. These models are + also called vision-language models, or VLMs. The difference from image-to-text + models is that these models take an additional text input, not restricting + the model to certain use cases like image captioning, and may also be trained + to accept a conversation as input.\",\"widgetModels\":[\"meta-llama/Llama-3.2-11B-Vision-Instruct\"],\"youtubeId\":\"IoGaGfU1CIg\",\"id\":\"image-text-to-text\",\"label\":\"Image-Text-to-Text\",\"libraries\":[\"transformers\"]},\"image-to-text\":{\"datasets\":[{\"description\":\"Dataset + from 12M image-text of Reddit\",\"id\":\"red_caps\"},{\"description\":\"Dataset + from 3.3M images of Google\",\"id\":\"datasets/conceptual_captions\"}],\"demo\":{\"inputs\":[{\"filename\":\"savanna.jpg\",\"type\":\"img\"}],\"outputs\":[{\"label\":\"Detailed + description\",\"content\":\"a herd of giraffes and zebras grazing in a field\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + robust image captioning model.\",\"id\":\"Salesforce/blip2-opt-2.7b\"},{\"description\":\"A + powerful and accurate image-to-text model that can also localize concepts + in images.\",\"id\":\"microsoft/kosmos-2-patch14-224\"},{\"description\":\"A + strong optical character recognition model.\",\"id\":\"facebook/nougat-base\"},{\"description\":\"A + powerful model that lets you have a conversation with the image.\",\"id\":\"llava-hf/llava-1.5-7b-hf\"}],\"spaces\":[{\"description\":\"An + application that compares various image captioning models.\",\"id\":\"nielsr/comparing-captioning-models\"},{\"description\":\"A + robust image captioning application.\",\"id\":\"flax-community/image-captioning\"},{\"description\":\"An + application that transcribes handwritings into text.\",\"id\":\"nielsr/TrOCR-handwritten\"},{\"description\":\"An + application that can caption images and answer questions about a given image.\",\"id\":\"Salesforce/BLIP\"},{\"description\":\"An + application that can caption images and answer questions with a conversational + agent.\",\"id\":\"Salesforce/BLIP2\"},{\"description\":\"An image captioning + application that demonstrates the effect of noise on captions.\",\"id\":\"johko/capdec-image-captioning\"}],\"summary\":\"Image + to text models output a text from a given image. Image captioning or optical + character recognition can be considered as the most common applications of + image to text.\",\"widgetModels\":[\"Salesforce/blip-image-captioning-large\"],\"youtubeId\":\"\",\"id\":\"image-to-text\",\"label\":\"Image-to-Text\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"keypoint-detection\":{\"datasets\":[{\"description\":\"A + dataset of hand keypoints of over 500k examples.\",\"id\":\"Vincent-luo/hagrid-mediapipe-hands\"}],\"demo\":{\"inputs\":[{\"filename\":\"keypoint-detection-input.png\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"keypoint-detection-output.png\",\"type\":\"img\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + robust keypoint detection model.\",\"id\":\"magic-leap-community/superpoint\"},{\"description\":\"Strong + keypoint detection model used to detect human pose.\",\"id\":\"facebook/sapiens-pose-1b\"}],\"spaces\":[{\"description\":\"An + application that detects hand keypoints in real-time.\",\"id\":\"datasciencedojo/Hand-Keypoint-Detection-Realtime\"},{\"description\":\"An + application to try a universal keypoint detection model.\",\"id\":\"merve/SuperPoint\"}],\"summary\":\"Keypoint + detection is the task of identifying meaningful distinctive points or features + in an image.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"keypoint-detection\",\"label\":\"Keypoint + Detection\",\"libraries\":[\"transformers\"]},\"mask-generation\":{\"datasets\":[],\"demo\":{\"inputs\":[{\"filename\":\"mask-generation-input.png\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"mask-generation-output.png\",\"type\":\"img\"}]},\"metrics\":[],\"models\":[{\"description\":\"Small + yet powerful mask generation model.\",\"id\":\"Zigeng/SlimSAM-uniform-50\"},{\"description\":\"Very + strong mask generation model.\",\"id\":\"facebook/sam2-hiera-large\"}],\"spaces\":[{\"description\":\"An + application that combines a mask generation model with a zero-shot object + detection model for text-guided image segmentation.\",\"id\":\"merve/OWLSAM2\"},{\"description\":\"An + application that compares the performance of a large and a small mask generation + model.\",\"id\":\"merve/slimsam\"},{\"description\":\"An application based + on an improved mask generation model.\",\"id\":\"SkalskiP/segment-anything-model-2\"},{\"description\":\"An + application to remove objects from videos using mask generation models.\",\"id\":\"SkalskiP/SAM_and_ProPainter\"}],\"summary\":\"Mask + generation is the task of generating masks that identify a specific object + or region of interest in a given image. Masks are often used in segmentation + tasks, where they provide a precise way to isolate the object of interest + for further processing or analysis.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"mask-generation\",\"label\":\"Mask + Generation\",\"libraries\":[\"transformers\"]},\"object-detection\":{\"datasets\":[{\"description\":\"Widely + used benchmark dataset for multiple vision tasks.\",\"id\":\"merve/coco2017\"},{\"description\":\"Multi-task + computer vision benchmark.\",\"id\":\"merve/pascal-voc\"}],\"demo\":{\"inputs\":[{\"filename\":\"object-detection-input.jpg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"object-detection-output.jpg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + Average Precision (AP) metric is the Area Under the PR Curve (AUC-PR). It + is calculated for each class separately\",\"id\":\"Average Precision\"},{\"description\":\"The + Mean Average Precision (mAP) metric is the overall average of the AP values\",\"id\":\"Mean + Average Precision\"},{\"description\":\"The AP\u03B1 metric is the Average + Precision at the IoU threshold of a \u03B1 value, for example, AP50 and AP75\",\"id\":\"AP\u03B1\"}],\"models\":[{\"description\":\"Solid + object detection model pre-trained on the COCO 2017 dataset.\",\"id\":\"facebook/detr-resnet-50\"},{\"description\":\"Real-time + and accurate object detection model.\",\"id\":\"jameslahm/yolov10x\"},{\"description\":\"Fast + and accurate object detection model trained on COCO and Object365 datasets.\",\"id\":\"PekingU/rtdetr_r18vd_coco_o365\"}],\"spaces\":[{\"description\":\"Leaderboard + to compare various object detection models across several metrics.\",\"id\":\"hf-vision/object_detection_leaderboard\"},{\"description\":\"An + application that contains various object detection models to try from.\",\"id\":\"Gradio-Blocks/Object-Detection-With-DETR-and-YOLOS\"},{\"description\":\"An + application that shows multiple cutting edge techniques for object detection + and tracking.\",\"id\":\"kadirnar/torchyolo\"},{\"description\":\"An object + tracking, segmentation and inpainting application.\",\"id\":\"VIPLab/Track-Anything\"},{\"description\":\"Very + fast object tracking application based on object detection.\",\"id\":\"merve/RT-DETR-tracking-coco\"}],\"summary\":\"Object + Detection models allow users to identify objects of certain defined classes. + Object detection models receive an image as input and output the images with + bounding boxes and labels on detected objects.\",\"widgetModels\":[\"facebook/detr-resnet-50\"],\"youtubeId\":\"WdAeKSOpxhw\",\"id\":\"object-detection\",\"label\":\"Object + Detection\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"video-classification\":{\"datasets\":[{\"description\":\"Benchmark + dataset used for video classification with videos that belong to 400 classes.\",\"id\":\"kinetics400\"}],\"demo\":{\"inputs\":[{\"filename\":\"video-classification-input.gif\",\"type\":\"img\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Playing + Guitar\",\"score\":0.514},{\"label\":\"Playing Tennis\",\"score\":0.193},{\"label\":\"Cooking\",\"score\":0.068}]}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"Strong + Video Classification model trained on the Kinetics 400 dataset.\",\"id\":\"google/vivit-b-16x2-kinetics400\"},{\"description\":\"Strong + Video Classification model trained on the Kinetics 400 dataset.\",\"id\":\"microsoft/xclip-base-patch32\"}],\"spaces\":[{\"description\":\"An + application that classifies video at different timestamps.\",\"id\":\"nateraw/lavila\"},{\"description\":\"An + application that classifies video.\",\"id\":\"fcakyon/video-classification\"}],\"summary\":\"Video + classification is the task of assigning a label or class to an entire video. + Videos are expected to have only one class for each video. Video classification + models take a video as input and return a prediction about which class the + video belongs to.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"video-classification\",\"label\":\"Video + Classification\",\"libraries\":[\"transformers\"]},\"question-answering\":{\"datasets\":[{\"description\":\"A + famous question answering dataset based on English articles from Wikipedia.\",\"id\":\"squad_v2\"},{\"description\":\"A + dataset of aggregated anonymized actual queries issued to the Google search + engine.\",\"id\":\"natural_questions\"}],\"demo\":{\"inputs\":[{\"label\":\"Question\",\"content\":\"Which + name is also used to describe the Amazon rainforest in English?\",\"type\":\"text\"},{\"label\":\"Context\",\"content\":\"The + Amazon rainforest, also known in English as Amazonia or the Amazon Jungle\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"Amazonia\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Exact + Match is a metric based on the strict character match of the predicted answer + and the right answer. For answers predicted correctly, the Exact Match will + be 1. Even if only one character is different, Exact Match will be 0\",\"id\":\"exact-match\"},{\"description\":\" + The F1-Score metric is useful if we value both false positives and false negatives + equally. The F1-Score is calculated on each word in the predicted sequence + against the correct answer\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + robust baseline model for most question answering domains.\",\"id\":\"deepset/roberta-base-squad2\"},{\"description\":\"Small + yet robust model that can answer questions.\",\"id\":\"distilbert/distilbert-base-cased-distilled-squad\"},{\"description\":\"A + special model that can answer questions from tables.\",\"id\":\"google/tapas-base-finetuned-wtq\"}],\"spaces\":[{\"description\":\"An + application that can answer a long question from Wikipedia.\",\"id\":\"deepset/wikipedia-assistant\"}],\"summary\":\"Question + Answering models can retrieve the answer to a question from a given text, + which is useful for searching for an answer in a document. Some question answering + models can generate answers without context!\",\"widgetModels\":[\"deepset/roberta-base-squad2\"],\"youtubeId\":\"ajPx5LwJD-I\",\"id\":\"question-answering\",\"label\":\"Question + Answering\",\"libraries\":[\"adapter-transformers\",\"allennlp\",\"transformers\",\"transformers.js\"]},\"reinforcement-learning\":{\"datasets\":[{\"description\":\"A + curation of widely used datasets for Data Driven Deep Reinforcement Learning + (D4RL)\",\"id\":\"edbeeching/decision_transformer_gym_replay\"}],\"demo\":{\"inputs\":[{\"label\":\"State\",\"content\":\"Red + traffic light, pedestrians are about to pass.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Action\",\"content\":\"Stop + the car.\",\"type\":\"text\"},{\"label\":\"Next State\",\"content\":\"Yellow + light, pedestrians have crossed.\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Accumulated + reward across all time steps discounted by a factor that ranges between 0 + and 1 and determines how much the agent optimizes for future relative to immediate + rewards. Measures how good is the policy ultimately found by a given algorithm + considering uncertainty over the future.\",\"id\":\"Discounted Total Reward\"},{\"description\":\"Average + return obtained after running the policy for a certain number of evaluation + episodes. As opposed to total reward, mean reward considers how much reward + a given algorithm receives while learning.\",\"id\":\"Mean Reward\"},{\"description\":\"Measures + how good a given algorithm is after a predefined time. Some algorithms may + be guaranteed to converge to optimal behavior across many time steps. However, + an agent that reaches an acceptable level of optimality after a given time + horizon may be preferable to one that ultimately reaches optimality but takes + a long time.\",\"id\":\"Level of Performance After Some Time\"}],\"models\":[{\"description\":\"A + Reinforcement Learning model trained on expert data from the Gym Hopper environment\",\"id\":\"edbeeching/decision-transformer-gym-hopper-expert\"},{\"description\":\"A + PPO agent playing seals/CartPole-v0 using the stable-baselines3 library and + the RL Zoo.\",\"id\":\"HumanCompatibleAI/ppo-seals-CartPole-v0\"}],\"spaces\":[{\"description\":\"An + application for a cute puppy agent learning to catch a stick.\",\"id\":\"ThomasSimonini/Huggy\"},{\"description\":\"An + application to play Snowball Fight with a reinforcement learning agent.\",\"id\":\"ThomasSimonini/SnowballFight\"}],\"summary\":\"Reinforcement + learning is the computational approach of learning from action by interacting + with an environment through trial and error and receiving rewards (negative + or positive) as feedback\",\"widgetModels\":[],\"youtubeId\":\"q0BiUn5LiBc\",\"id\":\"reinforcement-learning\",\"label\":\"Reinforcement + Learning\",\"libraries\":[\"transformers\",\"stable-baselines3\",\"ml-agents\",\"sample-factory\"]},\"sentence-similarity\":{\"datasets\":[{\"description\":\"Bing + queries with relevant passages from various web sources.\",\"id\":\"ms_marco\"}],\"demo\":{\"inputs\":[{\"label\":\"Source + sentence\",\"content\":\"Machine learning is so easy.\",\"type\":\"text\"},{\"label\":\"Sentences + to compare to\",\"content\":\"Deep learning is so straightforward.\",\"type\":\"text\"},{\"label\":\"\",\"content\":\"This + is so difficult, like rocket science.\",\"type\":\"text\"},{\"label\":\"\",\"content\":\"I + can't believe how much I struggled with this.\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Deep + learning is so straightforward.\",\"score\":0.623},{\"label\":\"This is so + difficult, like rocket science.\",\"score\":0.413},{\"label\":\"I can't believe + how much I struggled with this.\",\"score\":0.256}]}]},\"metrics\":[{\"description\":\"Reciprocal + Rank is a measure used to rank the relevancy of documents given a set of documents. + Reciprocal Rank is the reciprocal of the rank of the document retrieved, meaning, + if the rank is 3, the Reciprocal Rank is 0.33. If the rank is 1, the Reciprocal + Rank is 1\",\"id\":\"Mean Reciprocal Rank\"},{\"description\":\"The similarity + of the embeddings is evaluated mainly on cosine similarity. It is calculated + as the cosine of the angle between two vectors. It is particularly useful + when your texts are not the same length\",\"id\":\"Cosine Similarity\"}],\"models\":[{\"description\":\"This + model works well for sentences and paragraphs and can be used for clustering/grouping + and semantic searches.\",\"id\":\"sentence-transformers/all-mpnet-base-v2\"},{\"description\":\"A + multilingual robust sentence similarity model..\",\"id\":\"BAAI/bge-m3\"}],\"spaces\":[{\"description\":\"An + application that leverages sentence similarity to answer questions from YouTube + videos.\",\"id\":\"Gradio-Blocks/Ask_Questions_To_YouTube_Videos\"},{\"description\":\"An + application that retrieves relevant PubMed abstracts for a given online article + which can be used as further references.\",\"id\":\"Gradio-Blocks/pubmed-abstract-retriever\"},{\"description\":\"An + application that leverages sentence similarity to summarize text.\",\"id\":\"nickmuchi/article-text-summarizer\"},{\"description\":\"A + guide that explains how Sentence Transformers can be used for semantic search.\",\"id\":\"sentence-transformers/Sentence_Transformers_for_semantic_search\"}],\"summary\":\"Sentence + Similarity is the task of determining how similar two texts are. Sentence + similarity models convert input texts into vectors (embeddings) that capture + semantic information and calculate how close (similar) they are between them. + This task is particularly useful for information retrieval and clustering/grouping.\",\"widgetModels\":[\"BAAI/bge-small-en-v1.5\"],\"youtubeId\":\"VCZq5AkbNEU\",\"id\":\"sentence-similarity\",\"label\":\"Sentence + Similarity\",\"libraries\":[\"sentence-transformers\",\"spacy\",\"transformers.js\"]},\"summarization\":{\"canonicalId\":\"text2text-generation\",\"datasets\":[{\"description\":\"News + articles in five different languages along with their summaries. Widely used + for benchmarking multilingual summarization models.\",\"id\":\"mlsum\"},{\"description\":\"English + conversations and their summaries. Useful for benchmarking conversational + agents.\",\"id\":\"samsum\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"The + tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey + building, and the tallest structure in Paris. Its base is square, measuring + 125 metres (410 ft) on each side. It was the first structure to reach a height + of 300 metres. Excluding transmitters, the Eiffel Tower is the second tallest + free-standing structure in France after the Millau Viaduct.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Output\",\"content\":\"The + tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey + building. It was the first structure to reach a height of 300 metres.\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"The + generated sequence is compared against its summary, and the overlap of tokens + are counted. ROUGE-N refers to overlap of N subsequent tokens, ROUGE-1 refers + to overlap of single tokens and ROUGE-2 is the overlap of two subsequent tokens.\",\"id\":\"rouge\"}],\"models\":[{\"description\":\"A + strong summarization model trained on English news articles. Excels at generating + factual summaries.\",\"id\":\"facebook/bart-large-cnn\"},{\"description\":\"A + summarization model trained on medical articles.\",\"id\":\"Falconsai/medical_summarization\"}],\"spaces\":[{\"description\":\"An + application that can summarize long paragraphs.\",\"id\":\"pszemraj/summarize-long-text\"},{\"description\":\"A + much needed summarization application for terms and conditions.\",\"id\":\"ml6team/distilbart-tos-summarizer-tosdr\"},{\"description\":\"An + application that summarizes long documents.\",\"id\":\"pszemraj/document-summarization\"},{\"description\":\"An + application that can detect errors in abstractive summarization.\",\"id\":\"ml6team/post-processing-summarization\"}],\"summary\":\"Summarization + is the task of producing a shorter version of a document while preserving + its important information. Some models can extract text from the original + input, while other models can generate entirely new text.\",\"widgetModels\":[\"facebook/bart-large-cnn\"],\"youtubeId\":\"yHnr5Dk2zCI\",\"id\":\"summarization\",\"label\":\"Summarization\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"table-question-answering\":{\"datasets\":[{\"description\":\"The + WikiTableQuestions dataset is a large-scale dataset for the task of question + answering on semi-structured tables.\",\"id\":\"wikitablequestions\"},{\"description\":\"WikiSQL + is a dataset of 80654 hand-annotated examples of questions and SQL queries + distributed across 24241 tables from Wikipedia.\",\"id\":\"wikisql\"}],\"demo\":{\"inputs\":[{\"table\":[[\"Rank\",\"Name\",\"No.of + reigns\",\"Combined days\"],[\"1\",\"lou Thesz\",\"3\",\"3749\"],[\"2\",\"Ric + Flair\",\"8\",\"3103\"],[\"3\",\"Harley Race\",\"7\",\"1799\"]],\"type\":\"tabular\"},{\"label\":\"Question\",\"content\":\"What + is the number of reigns for Harley Race?\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Result\",\"content\":\"7\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Checks + whether the predicted answer(s) is the same as the ground-truth answer(s).\",\"id\":\"Denotation + Accuracy\"}],\"models\":[{\"description\":\"A table question answering model + that is capable of neural SQL execution, i.e., employ TAPEX to execute a SQL + query on a given table.\",\"id\":\"microsoft/tapex-base\"},{\"description\":\"A + robust table question answering model.\",\"id\":\"google/tapas-base-finetuned-wtq\"}],\"spaces\":[{\"description\":\"An + application that answers questions based on table CSV files.\",\"id\":\"katanaml/table-query\"}],\"summary\":\"Table + Question Answering (Table QA) is the answering a question about an information + on a given table.\",\"widgetModels\":[\"google/tapas-base-finetuned-wtq\"],\"id\":\"table-question-answering\",\"label\":\"Table + Question Answering\",\"libraries\":[\"transformers\"]},\"tabular-classification\":{\"datasets\":[{\"description\":\"A + comprehensive curation of datasets covering all benchmarks.\",\"id\":\"inria-soda/tabular-benchmark\"}],\"demo\":{\"inputs\":[{\"table\":[[\"Glucose\",\"Blood + Pressure \",\"Skin Thickness\",\"Insulin\",\"BMI\"],[\"148\",\"72\",\"35\",\"0\",\"33.6\"],[\"150\",\"50\",\"30\",\"0\",\"35.1\"],[\"141\",\"60\",\"29\",\"1\",\"39.2\"]],\"type\":\"tabular\"}],\"outputs\":[{\"table\":[[\"Diabetes\"],[\"1\"],[\"1\"],[\"0\"]],\"type\":\"tabular\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"Breast + cancer prediction model based on decision trees.\",\"id\":\"scikit-learn/cancer-prediction-trees\"}],\"spaces\":[{\"description\":\"An + application that can predict defective products on a production line.\",\"id\":\"scikit-learn/tabular-playground\"},{\"description\":\"An + application that compares various tabular classification techniques on different + datasets.\",\"id\":\"scikit-learn/classification\"}],\"summary\":\"Tabular + classification is the task of classifying a target category (a group) based + on set of attributes.\",\"widgetModels\":[\"scikit-learn/tabular-playground\"],\"youtubeId\":\"\",\"id\":\"tabular-classification\",\"label\":\"Tabular + Classification\",\"libraries\":[\"sklearn\"]},\"tabular-regression\":{\"datasets\":[{\"description\":\"A + comprehensive curation of datasets covering all benchmarks.\",\"id\":\"inria-soda/tabular-benchmark\"}],\"demo\":{\"inputs\":[{\"table\":[[\"Car + Name\",\"Horsepower\",\"Weight\"],[\"ford torino\",\"140\",\"3,449\"],[\"amc + hornet\",\"97\",\"2,774\"],[\"toyota corolla\",\"65\",\"1,773\"]],\"type\":\"tabular\"}],\"outputs\":[{\"table\":[[\"MPG + (miles per gallon)\"],[\"17\"],[\"18\"],[\"31\"]],\"type\":\"tabular\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"mse\"},{\"description\":\"Coefficient + of determination (or R-squared) is a measure of how well the model fits the + data. Higher R-squared is considered a better fit.\",\"id\":\"r-squared\"}],\"models\":[{\"description\":\"Fish + weight prediction based on length measurements and species.\",\"id\":\"scikit-learn/Fish-Weight\"}],\"spaces\":[{\"description\":\"An + application that can predict weight of a fish based on set of attributes.\",\"id\":\"scikit-learn/fish-weight-prediction\"}],\"summary\":\"Tabular + regression is the task of predicting a numerical value given a set of attributes.\",\"widgetModels\":[\"scikit-learn/Fish-Weight\"],\"youtubeId\":\"\",\"id\":\"tabular-regression\",\"label\":\"Tabular + Regression\",\"libraries\":[\"sklearn\"]},\"text-classification\":{\"datasets\":[{\"description\":\"A + widely used dataset used to benchmark multiple variants of text classification.\",\"id\":\"nyu-mll/glue\"},{\"description\":\"A + text classification dataset used to benchmark natural language inference models\",\"id\":\"stanfordnlp/snli\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"I + love Hugging Face!\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"POSITIVE\",\"score\":0.9},{\"label\":\"NEUTRAL\",\"score\":0.1},{\"label\":\"NEGATIVE\",\"score\":0}]}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"The + F1 metric is the harmonic mean of the precision and recall. It can be calculated + as: F1 = 2 * (precision * recall) / (precision + recall)\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + robust model trained for sentiment analysis.\",\"id\":\"distilbert/distilbert-base-uncased-finetuned-sst-2-english\"},{\"description\":\"A + sentiment analysis model specialized in financial sentiment.\",\"id\":\"ProsusAI/finbert\"},{\"description\":\"A + sentiment analysis model specialized in analyzing tweets.\",\"id\":\"cardiffnlp/twitter-roberta-base-sentiment-latest\"},{\"description\":\"A + model that can classify languages.\",\"id\":\"papluca/xlm-roberta-base-language-detection\"},{\"description\":\"A + model that can classify text generation attacks.\",\"id\":\"meta-llama/Prompt-Guard-86M\"}],\"spaces\":[{\"description\":\"An + application that can classify financial sentiment.\",\"id\":\"IoannisTr/Tech_Stocks_Trading_Assistant\"},{\"description\":\"A + dashboard that contains various text classification tasks.\",\"id\":\"miesnerjacob/Multi-task-NLP\"},{\"description\":\"An + application that analyzes user reviews in healthcare.\",\"id\":\"spacy/healthsea-demo\"}],\"summary\":\"Text + Classification is the task of assigning a label or class to a given text. + Some use cases are sentiment analysis, natural language inference, and assessing + grammatical correctness.\",\"widgetModels\":[\"distilbert/distilbert-base-uncased-finetuned-sst-2-english\"],\"youtubeId\":\"leNG9fN9FQU\",\"id\":\"text-classification\",\"label\":\"Text + Classification\",\"libraries\":[\"adapter-transformers\",\"setfit\",\"spacy\",\"transformers\",\"transformers.js\"]},\"text-generation\":{\"datasets\":[{\"description\":\"A + large multilingual dataset of text crawled from the web.\",\"id\":\"mc4\"},{\"description\":\"Diverse + open-source data consisting of 22 smaller high-quality datasets. It was used + to train GPT-Neo.\",\"id\":\"the_pile\"},{\"description\":\"Truly open-source, + curated and cleaned dialogue dataset.\",\"id\":\"HuggingFaceH4/ultrachat_200k\"},{\"description\":\"An + instruction dataset with preference ratings on responses.\",\"id\":\"openbmb/UltraFeedback\"},{\"description\":\"A + large synthetic dataset for alignment of text generation models.\",\"id\":\"argilla/magpie-ultra-v0.1\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"Once + upon a time,\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Output\",\"content\":\"Once + upon a time, we knew that our ancestors were on the verge of extinction. The + great explorers and poets of the Old World, from Alexander the Great to Chaucer, + are dead and gone. A good many of our ancient explorers and poets have\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Cross + Entropy is a metric that calculates the difference between two probability + distributions. Each probability distribution is the distribution of predicted + words\",\"id\":\"Cross Entropy\"},{\"description\":\"The Perplexity metric + is the exponential of the cross-entropy loss. It evaluates the probabilities + assigned to the next word by the model. Lower perplexity indicates better + performance\",\"id\":\"Perplexity\"}],\"models\":[{\"description\":\"A text-generation + model trained to follow instructions.\",\"id\":\"google/gemma-2-2b-it\"},{\"description\":\"Very + powerful text generation model trained to follow instructions.\",\"id\":\"meta-llama/Meta-Llama-3.1-8B-Instruct\"},{\"description\":\"Small + yet powerful text generation model.\",\"id\":\"microsoft/Phi-3-mini-4k-instruct\"},{\"description\":\"A + very powerful model that can solve mathematical problems.\",\"id\":\"AI-MO/NuminaMath-7B-TIR\"},{\"description\":\"Strong + text generation model to follow instructions.\",\"id\":\"Qwen/Qwen2.5-7B-Instruct\"},{\"description\":\"Very + strong open-source large language model.\",\"id\":\"nvidia/Llama-3.1-Nemotron-70B-Instruct\"}],\"spaces\":[{\"description\":\"A + leaderboard to compare different open-source text generation models based + on various benchmarks.\",\"id\":\"open-llm-leaderboard/open_llm_leaderboard\"},{\"description\":\"A + leaderboard for comparing chain-of-thought performance of models.\",\"id\":\"logikon/open_cot_leaderboard\"},{\"description\":\"An + text generation based application based on a very powerful LLaMA2 model.\",\"id\":\"ysharma/Explore_llamav2_with_TGI\"},{\"description\":\"An + text generation based application to converse with Zephyr model.\",\"id\":\"HuggingFaceH4/zephyr-chat\"},{\"description\":\"A + leaderboard that ranks text generation models based on blind votes from people.\",\"id\":\"lmsys/chatbot-arena-leaderboard\"},{\"description\":\"An + chatbot to converse with a very powerful text generation model.\",\"id\":\"mlabonne/phixtral-chat\"}],\"summary\":\"Generating + text is the task of generating new text given another text. These models can, + for example, fill in incomplete text or paraphrase.\",\"widgetModels\":[\"mistralai/Mistral-Nemo-Instruct-2407\"],\"youtubeId\":\"e9gNEAlsOvU\",\"id\":\"text-generation\",\"label\":\"Text + Generation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"text-to-image\":{\"datasets\":[{\"description\":\"RedCaps + is a large-scale dataset of 12M image-text pairs collected from Reddit.\",\"id\":\"red_caps\"},{\"description\":\"Conceptual + Captions is a dataset consisting of ~3.3M images annotated with captions.\",\"id\":\"conceptual_captions\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"A + city above clouds, pastel colors, Victorian style\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"image.jpeg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + Inception Score (IS) measure assesses diversity and meaningfulness. It uses + a generated image sample to predict its label. A higher score signifies more + diverse and meaningful images.\",\"id\":\"IS\"},{\"description\":\"The Fr\xE9chet + Inception Distance (FID) calculates the distance between distributions between + synthetic and real samples. A lower FID score indicates better similarity + between the distributions of real and generated images.\",\"id\":\"FID\"},{\"description\":\"R-precision + assesses how the generated image aligns with the provided text description. + It uses the generated images as queries to retrieve relevant text descriptions. + The top 'r' relevant descriptions are selected and used to calculate R-precision + as r/R, where 'R' is the number of ground truth descriptions associated with + the generated images. A higher R-precision value indicates a better model.\",\"id\":\"R-Precision\"}],\"models\":[{\"description\":\"One + of the most powerful image generation models that can generate realistic outputs.\",\"id\":\"black-forest-labs/FLUX.1-dev\"},{\"description\":\"A + powerful yet fast image generation model.\",\"id\":\"latent-consistency/lcm-lora-sdxl\"},{\"description\":\"Text-to-image + model for photorealistic generation.\",\"id\":\"Kwai-Kolors/Kolors\"},{\"description\":\"A + powerful text-to-image model.\",\"id\":\"stabilityai/stable-diffusion-3-medium-diffusers\"}],\"spaces\":[{\"description\":\"A + powerful text-to-image application.\",\"id\":\"stabilityai/stable-diffusion-3-medium\"},{\"description\":\"A + text-to-image application to generate comics.\",\"id\":\"jbilcke-hf/ai-comic-factory\"},{\"description\":\"An + application to match multiple custom image generation models.\",\"id\":\"multimodalart/flux-lora-lab\"},{\"description\":\"A + powerful yet very fast image generation application.\",\"id\":\"latent-consistency/lcm-lora-for-sdxl\"},{\"description\":\"A + gallery to explore various text-to-image models.\",\"id\":\"multimodalart/LoraTheExplorer\"},{\"description\":\"An + application for `text-to-image`, `image-to-image` and image inpainting.\",\"id\":\"ArtGAN/Stable-Diffusion-ControlNet-WebUI\"},{\"description\":\"An + application to generate realistic images given photos of a person and a prompt.\",\"id\":\"InstantX/InstantID\"}],\"summary\":\"Text-to-image + is the task of generating images from input text. These pipelines can also + be used to modify and edit images based on text prompts.\",\"widgetModels\":[\"black-forest-labs/FLUX.1-dev\"],\"youtubeId\":\"\",\"id\":\"text-to-image\",\"label\":\"Text-to-Image\",\"libraries\":[\"diffusers\"]},\"text-to-speech\":{\"canonicalId\":\"text-to-audio\",\"datasets\":[{\"description\":\"10K + hours of multi-speaker English dataset.\",\"id\":\"parler-tts/mls_eng_10k\"},{\"description\":\"Multi-speaker + English dataset.\",\"id\":\"mythicinfinity/libritts_r\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"I + love audio models on the Hub!\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"audio.wav\",\"type\":\"audio\"}]},\"metrics\":[{\"description\":\"The + Mel Cepstral Distortion (MCD) metric is used to calculate the quality of generated + speech.\",\"id\":\"mel cepstral distortion\"}],\"models\":[{\"description\":\"A + powerful TTS model.\",\"id\":\"parler-tts/parler-tts-large-v1\"},{\"description\":\"A + massively multi-lingual TTS model.\",\"id\":\"coqui/XTTS-v2\"},{\"description\":\"Robust + TTS model.\",\"id\":\"metavoiceio/metavoice-1B-v0.1\"},{\"description\":\"A + prompt based, powerful TTS model.\",\"id\":\"parler-tts/parler_tts_mini_v0.1\"}],\"spaces\":[{\"description\":\"An + application for generate highly realistic, multilingual speech.\",\"id\":\"suno/bark\"},{\"description\":\"An + application on XTTS, a voice generation model that lets you clone voices into + different languages.\",\"id\":\"coqui/xtts\"},{\"description\":\"An application + that generates speech in different styles in English and Chinese.\",\"id\":\"mrfakename/E2-F5-TTS\"},{\"description\":\"An + application that synthesizes speech for diverse speaker prompts.\",\"id\":\"parler-tts/parler_tts_mini\"}],\"summary\":\"Text-to-Speech + (TTS) is the task of generating natural sounding speech given text input. + TTS models can be extended to have a single model that generates speech for + multiple speakers and multiple languages.\",\"widgetModels\":[\"suno/bark\"],\"youtubeId\":\"NW62DpzJ274\",\"id\":\"text-to-speech\",\"label\":\"Text-to-Speech\",\"libraries\":[\"espnet\",\"tensorflowtts\",\"transformers\",\"transformers.js\"]},\"text-to-video\":{\"datasets\":[{\"description\":\"Microsoft + Research Video to Text is a large-scale dataset for open domain video captioning\",\"id\":\"iejMac/CLIP-MSR-VTT\"},{\"description\":\"UCF101 + Human Actions dataset consists of 13,320 video clips from YouTube, with 101 + classes.\",\"id\":\"quchenyuan/UCF101-ZIP\"},{\"description\":\"A high-quality + dataset for human action recognition in YouTube videos.\",\"id\":\"nateraw/kinetics\"},{\"description\":\"A + dataset of video clips of humans performing pre-defined basic actions with + everyday objects.\",\"id\":\"HuggingFaceM4/something_something_v2\"},{\"description\":\"This + dataset consists of text-video pairs and contains noisy samples with irrelevant + video descriptions\",\"id\":\"HuggingFaceM4/webvid\"},{\"description\":\"A + dataset of short Flickr videos for the temporal localization of events with + descriptions.\",\"id\":\"iejMac/CLIP-DiDeMo\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"Darth + Vader is surfing on the waves.\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"text-to-video-output.gif\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"Inception + Score uses an image classification model that predicts class labels and evaluates + how distinct and diverse the images are. A higher score indicates better video + generation.\",\"id\":\"is\"},{\"description\":\"Frechet Inception Distance + uses an image classification model to obtain image embeddings. The metric + compares mean and standard deviation of the embeddings of real and generated + images. A smaller score indicates better video generation.\",\"id\":\"fid\"},{\"description\":\"Frechet + Video Distance uses a model that captures coherence for changes in frames + and the quality of each frame. A smaller score indicates better video generation.\",\"id\":\"fvd\"},{\"description\":\"CLIPSIM + measures similarity between video frames and text using an image-text similarity + model. A higher score indicates better video generation.\",\"id\":\"clipsim\"}],\"models\":[{\"description\":\"A + strong model for consistent video generation.\",\"id\":\"rain1011/pyramid-flow-sd3\"},{\"description\":\"A + robust model for text-to-video generation.\",\"id\":\"VideoCrafter/VideoCrafter2\"},{\"description\":\"A + cutting-edge text-to-video generation model.\",\"id\":\"TIGER-Lab/T2V-Turbo-V2\"}],\"spaces\":[{\"description\":\"An + application that generates video from text.\",\"id\":\"VideoCrafter/VideoCrafter\"},{\"description\":\"Consistent + video generation application.\",\"id\":\"TIGER-Lab/T2V-Turbo-V2\"},{\"description\":\"A + cutting edge video generation application.\",\"id\":\"Pyramid-Flow/pyramid-flow\"}],\"summary\":\"Text-to-video + models can be used in any application that requires generating consistent + sequence of images from text. \",\"widgetModels\":[],\"id\":\"text-to-video\",\"label\":\"Text-to-Video\",\"libraries\":[\"diffusers\"]},\"token-classification\":{\"datasets\":[{\"description\":\"A + widely used dataset useful to benchmark named entity recognition models.\",\"id\":\"eriktks/conll2003\"},{\"description\":\"A + multilingual dataset of Wikipedia articles annotated for named entity recognition + in over 150 different languages.\",\"id\":\"unimelb-nlp/wikiann\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"My + name is Omar and I live in Z\xFCrich.\",\"type\":\"text\"}],\"outputs\":[{\"text\":\"My + name is Omar and I live in Z\xFCrich.\",\"tokens\":[{\"type\":\"PERSON\",\"start\":11,\"end\":15},{\"type\":\"GPE\",\"start\":30,\"end\":36}],\"type\":\"text-with-tokens\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + robust performance model to identify people, locations, organizations and + names of miscellaneous entities.\",\"id\":\"dslim/bert-base-NER\"},{\"description\":\"A + strong model to identify people, locations, organizations and names in multiple + languages.\",\"id\":\"FacebookAI/xlm-roberta-large-finetuned-conll03-english\"},{\"description\":\"A + token classification model specialized on medical entity recognition.\",\"id\":\"blaze999/Medical-NER\"},{\"description\":\"Flair + models are typically the state of the art in named entity recognition tasks.\",\"id\":\"flair/ner-english\"}],\"spaces\":[{\"description\":\"An + application that can recognizes entities, extracts noun chunks and recognizes + various linguistic features of each token.\",\"id\":\"spacy/gradio_pipeline_visualizer\"}],\"summary\":\"Token + classification is a natural language understanding task in which a label is + assigned to some tokens in a text. Some popular token classification subtasks + are Named Entity Recognition (NER) and Part-of-Speech (PoS) tagging. NER models + could be trained to identify specific entities in a text, such as dates, individuals + and places; and PoS tagging would identify, for example, which words in a + text are verbs, nouns, and punctuation marks.\",\"widgetModels\":[\"FacebookAI/xlm-roberta-large-finetuned-conll03-english\"],\"youtubeId\":\"wVHdVlPScxA\",\"id\":\"token-classification\",\"label\":\"Token + Classification\",\"libraries\":[\"adapter-transformers\",\"flair\",\"spacy\",\"span-marker\",\"stanza\",\"transformers\",\"transformers.js\"]},\"translation\":{\"canonicalId\":\"text2text-generation\",\"datasets\":[{\"description\":\"A + dataset of copyright-free books translated into 16 different languages.\",\"id\":\"Helsinki-NLP/opus_books\"},{\"description\":\"An + example of translation between programming languages. This dataset consists + of functions in Java and C#.\",\"id\":\"google/code_x_glue_cc_code_to_code_trans\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"My + name is Omar and I live in Z\xFCrich.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Output\",\"content\":\"Mein + Name ist Omar und ich wohne in Z\xFCrich.\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"BLEU + score is calculated by counting the number of shared single or subsequent + tokens between the generated sequence and the reference. Subsequent n tokens + are called \u201Cn-grams\u201D. Unigram refers to a single token while bi-gram + refers to token pairs and n-grams refer to n subsequent tokens. The score + ranges from 0 to 1, where 1 means the translation perfectly matched and 0 + did not match at all\",\"id\":\"bleu\"},{\"description\":\"\",\"id\":\"sacrebleu\"}],\"models\":[{\"description\":\"Very + powerful model that can translate many languages between each other, especially + low-resource languages.\",\"id\":\"facebook/nllb-200-1.3B\"},{\"description\":\"A + general-purpose Transformer that can be used to translate from English to + German, French, or Romanian.\",\"id\":\"google-t5/t5-base\"}],\"spaces\":[{\"description\":\"An + application that can translate between 100 languages.\",\"id\":\"Iker/Translate-100-languages\"},{\"description\":\"An + application that can translate between many languages.\",\"id\":\"Geonmo/nllb-translation-demo\"}],\"summary\":\"Translation + is the task of converting text from one language to another.\",\"widgetModels\":[\"facebook/mbart-large-50-many-to-many-mmt\"],\"youtubeId\":\"1JvfrvZgi6c\",\"id\":\"translation\",\"label\":\"Translation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"unconditional-image-generation\":{\"datasets\":[{\"description\":\"The + CIFAR-100 dataset consists of 60000 32x32 colour images in 100 classes, with + 600 images per class.\",\"id\":\"cifar100\"},{\"description\":\"Multiple images + of celebrities, used for facial expression translation.\",\"id\":\"CelebA\"}],\"demo\":{\"inputs\":[{\"label\":\"Seed\",\"content\":\"42\",\"type\":\"text\"},{\"label\":\"Number + of images to generate:\",\"content\":\"4\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"unconditional-image-generation-output.jpeg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + inception score (IS) evaluates the quality of generated images. It measures + the diversity of the generated images (the model predictions are evenly distributed + across all possible labels) and their 'distinction' or 'sharpness' (the model + confidently predicts a single label for each image).\",\"id\":\"Inception + score (IS)\"},{\"description\":\"The Fr\xE9chet Inception Distance (FID) evaluates + the quality of images created by a generative model by calculating the distance + between feature vectors for real and generated images.\",\"id\":\"Fre\u0107het + Inception Distance (FID)\"}],\"models\":[{\"description\":\"High-quality image + generation model trained on the CIFAR-10 dataset. It synthesizes images of + the ten classes presented in the dataset using diffusion probabilistic models, + a class of latent variable models inspired by considerations from nonequilibrium + thermodynamics.\",\"id\":\"google/ddpm-cifar10-32\"},{\"description\":\"High-quality + image generation model trained on the 256x256 CelebA-HQ dataset. It synthesizes + images of faces using diffusion probabilistic models, a class of latent variable + models inspired by considerations from nonequilibrium thermodynamics.\",\"id\":\"google/ddpm-celebahq-256\"}],\"spaces\":[{\"description\":\"An + application that can generate realistic faces.\",\"id\":\"CompVis/celeba-latent-diffusion\"}],\"summary\":\"Unconditional + image generation is the task of generating images with no condition in any + context (like a prompt text or another image). Once trained, the model will + create images that resemble its training data distribution.\",\"widgetModels\":[\"\"],\"youtubeId\":\"\",\"id\":\"unconditional-image-generation\",\"label\":\"Unconditional + Image Generation\",\"libraries\":[\"diffusers\"]},\"video-text-to-text\":{\"datasets\":[{\"description\":\"Multiple-choice + questions and answers about videos.\",\"id\":\"lmms-lab/Video-MME\"},{\"description\":\"A + dataset of instructions and question-answer pairs about videos.\",\"id\":\"lmms-lab/VideoChatGPT\"},{\"description\":\"Large + video understanding dataset.\",\"id\":\"HuggingFaceFV/finevideo\"}],\"demo\":{\"inputs\":[{\"filename\":\"video-text-to-text-input.gif\",\"type\":\"img\"},{\"label\":\"Text + Prompt\",\"content\":\"What is happening in this video?\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"The + video shows a series of images showing a fountain with water jets and a variety + of colorful flowers and butterflies in the background.\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + robust video-text-to-text model that can take in image and video inputs.\",\"id\":\"llava-hf/llava-onevision-qwen2-72b-ov-hf\"},{\"description\":\"Large + and powerful video-text-to-text model that can take in image and video inputs.\",\"id\":\"llava-hf/LLaVA-NeXT-Video-34B-hf\"}],\"spaces\":[{\"description\":\"An + application to chat with a video-text-to-text model.\",\"id\":\"llava-hf/video-llava\"},{\"description\":\"A + leaderboard for various video-text-to-text models.\",\"id\":\"opencompass/openvlm_video_leaderboard\"}],\"summary\":\"Video-text-to-text + models take in a video and a text prompt and output text. These models are + also called video-language models.\",\"widgetModels\":[\"\"],\"youtubeId\":\"\",\"id\":\"video-text-to-text\",\"label\":\"Video-Text-to-Text\",\"libraries\":[\"transformers\"]},\"visual-question-answering\":{\"datasets\":[{\"description\":\"A + widely used dataset containing questions (with answers) about images.\",\"id\":\"Graphcore/vqa\"},{\"description\":\"A + dataset to benchmark visual reasoning based on text in images.\",\"id\":\"facebook/textvqa\"}],\"demo\":{\"inputs\":[{\"filename\":\"elephant.jpeg\",\"type\":\"img\"},{\"label\":\"Question\",\"content\":\"What + is in this image?\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"elephant\",\"score\":0.97},{\"label\":\"elephants\",\"score\":0.06},{\"label\":\"animal\",\"score\":0.003}]}]},\"isPlaceholder\":false,\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"Measures + how much a predicted answer differs from the ground truth based on the difference + in their semantic meaning.\",\"id\":\"wu-palmer similarity\"}],\"models\":[{\"description\":\"A + visual question answering model trained to convert charts and plots to text.\",\"id\":\"google/deplot\"},{\"description\":\"A + visual question answering model trained for mathematical reasoning and chart + derendering from images.\",\"id\":\"google/matcha-base\"},{\"description\":\"A + strong visual question answering that answers questions from book covers.\",\"id\":\"google/pix2struct-ocrvqa-large\"}],\"spaces\":[{\"description\":\"An + application that compares visual question answering models across different + tasks.\",\"id\":\"merve/pix2struct\"},{\"description\":\"An application that + can answer questions based on images.\",\"id\":\"nielsr/vilt-vqa\"},{\"description\":\"An + application that can caption images and answer questions about a given image. + \",\"id\":\"Salesforce/BLIP\"},{\"description\":\"An application that can + caption images and answer questions about a given image. \",\"id\":\"vumichien/Img2Prompt\"}],\"summary\":\"Visual + Question Answering is the task of answering open-ended questions based on + an image. They output natural language responses to natural language questions.\",\"widgetModels\":[\"dandelin/vilt-b32-finetuned-vqa\"],\"youtubeId\":\"\",\"id\":\"visual-question-answering\",\"label\":\"Visual + Question Answering\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"zero-shot-classification\":{\"datasets\":[{\"description\":\"A + widely used dataset used to benchmark multiple variants of text classification.\",\"id\":\"nyu-mll/glue\"},{\"description\":\"The + Multi-Genre Natural Language Inference (MultiNLI) corpus is a crowd-sourced + collection of 433k sentence pairs annotated with textual entailment information.\",\"id\":\"nyu-mll/multi_nli\"},{\"description\":\"FEVER + is a publicly available dataset for fact extraction and verification against + textual sources.\",\"id\":\"fever/fever\"}],\"demo\":{\"inputs\":[{\"label\":\"Text + Input\",\"content\":\"Dune is the best movie ever.\",\"type\":\"text\"},{\"label\":\"Candidate + Labels\",\"content\":\"CINEMA, ART, MUSIC\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"CINEMA\",\"score\":0.9},{\"label\":\"ART\",\"score\":0.1},{\"label\":\"MUSIC\",\"score\":0}]}]},\"metrics\":[],\"models\":[{\"description\":\"Powerful + zero-shot text classification model.\",\"id\":\"facebook/bart-large-mnli\"},{\"description\":\"Powerful + zero-shot multilingual text classification model that can accomplish multiple + tasks.\",\"id\":\"MoritzLaurer/mDeBERTa-v3-base-xnli-multilingual-nli-2mil7\"}],\"spaces\":[],\"summary\":\"Zero-shot + text classification is a task in natural language processing where a model + is trained on a set of labeled examples but is then able to classify new examples + from previously unseen classes.\",\"widgetModels\":[\"facebook/bart-large-mnli\"],\"id\":\"zero-shot-classification\",\"label\":\"Zero-Shot + Classification\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"zero-shot-image-classification\":{\"datasets\":[{\"description\":\"\",\"id\":\"\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-classification-input.jpeg\",\"type\":\"img\"},{\"label\":\"Classes\",\"content\":\"cat, + dog, bird\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Cat\",\"score\":0.664},{\"label\":\"Dog\",\"score\":0.329},{\"label\":\"Bird\",\"score\":0.008}]}]},\"metrics\":[{\"description\":\"Computes + the number of times the correct label appears in top K labels predicted\",\"id\":\"top-K + accuracy\"}],\"models\":[{\"description\":\"Robust image classification model + trained on publicly available image-caption data.\",\"id\":\"openai/clip-vit-base-patch16\"},{\"description\":\"Strong + zero-shot image classification model.\",\"id\":\"google/siglip-so400m-patch14-224\"},{\"description\":\"Small + yet powerful zero-shot image classification model that can run on edge devices.\",\"id\":\"apple/MobileCLIP-S1-OpenCLIP\"},{\"description\":\"Strong + image classification model for biomedical domain.\",\"id\":\"microsoft/BiomedCLIP-PubMedBERT_256-vit_base_patch16_224\"}],\"spaces\":[{\"description\":\"An + application that leverages zero-shot image classification to find best captions + to generate an image. \",\"id\":\"pharma/CLIP-Interrogator\"},{\"description\":\"An + application to compare different zero-shot image classification models. \",\"id\":\"merve/compare_clip_siglip\"}],\"summary\":\"Zero-shot + image classification is the task of classifying previously unseen classes + during training of a model.\",\"widgetModels\":[\"google/siglip-so400m-patch14-224\"],\"youtubeId\":\"\",\"id\":\"zero-shot-image-classification\",\"label\":\"Zero-Shot + Image Classification\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"zero-shot-object-detection\":{\"datasets\":[],\"demo\":{\"inputs\":[{\"filename\":\"zero-shot-object-detection-input.jpg\",\"type\":\"img\"},{\"label\":\"Classes\",\"content\":\"cat, + dog, bird\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"zero-shot-object-detection-output.jpg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + Average Precision (AP) metric is the Area Under the PR Curve (AUC-PR). It + is calculated for each class separately\",\"id\":\"Average Precision\"},{\"description\":\"The + Mean Average Precision (mAP) metric is the overall average of the AP values\",\"id\":\"Mean + Average Precision\"},{\"description\":\"The AP\u03B1 metric is the Average + Precision at the IoU threshold of a \u03B1 value, for example, AP50 and AP75\",\"id\":\"AP\u03B1\"}],\"models\":[{\"description\":\"Solid + zero-shot object detection model.\",\"id\":\"IDEA-Research/grounding-dino-base\"},{\"description\":\"Cutting-edge + zero-shot object detection model.\",\"id\":\"google/owlv2-base-patch16-ensemble\"}],\"spaces\":[{\"description\":\"A + demo to try the state-of-the-art zero-shot object detection model, OWLv2.\",\"id\":\"merve/owlv2\"},{\"description\":\"A + demo that combines a zero-shot object detection and mask generation model + for zero-shot segmentation.\",\"id\":\"merve/OWLSAM\"}],\"summary\":\"Zero-shot + object detection is a computer vision task to detect objects and their classes + in images, without any prior training or knowledge of the classes. Zero-shot + object detection models receive an image as input, as well as a list of candidate + classes, and output the bounding boxes and labels where the objects have been + detected.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"zero-shot-object-detection\",\"label\":\"Zero-Shot + Object Detection\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"text-to-3d\":{\"datasets\":[{\"description\":\"A + large dataset of over 10 million 3D objects.\",\"id\":\"allenai/objaverse-xl\"},{\"description\":\"Descriptive + captions for 3D objects in Objaverse.\",\"id\":\"tiange/Cap3D\"}],\"demo\":{\"inputs\":[{\"label\":\"Prompt\",\"content\":\"a + cat statue\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Result\",\"content\":\"text-to-3d-3d-output-filename.glb\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"Text-to-3D + mesh model by OpenAI\",\"id\":\"openai/shap-e\"},{\"description\":\"Generative + 3D gaussian splatting model.\",\"id\":\"ashawkey/LGM\"}],\"spaces\":[{\"description\":\"Text-to-3D + demo with mesh outputs.\",\"id\":\"hysts/Shap-E\"},{\"description\":\"Text/image-to-3D + demo with splat outputs.\",\"id\":\"ashawkey/LGM\"}],\"summary\":\"Text-to-3D + models take in text input and produce 3D output.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"text-to-3d\",\"label\":\"Text-to-3D\",\"libraries\":[\"diffusers\"]},\"image-to-3d\":{\"datasets\":[{\"description\":\"A + large dataset of over 10 million 3D objects.\",\"id\":\"allenai/objaverse-xl\"},{\"description\":\"A + dataset of isolated object images for evaluating image-to-3D models.\",\"id\":\"dylanebert/iso3d\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-to-3d-image-input.png\",\"type\":\"img\"}],\"outputs\":[{\"label\":\"Result\",\"content\":\"image-to-3d-3d-output-filename.glb\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"Fast + image-to-3D mesh model by Tencent.\",\"id\":\"TencentARC/InstantMesh\"},{\"description\":\"Fast + image-to-3D mesh model by StabilityAI\",\"id\":\"stabilityai/TripoSR\"},{\"description\":\"A + scaled up image-to-3D mesh model derived from TripoSR.\",\"id\":\"hwjiang/Real3D\"},{\"description\":\"Generative + 3D gaussian splatting model.\",\"id\":\"ashawkey/LGM\"}],\"spaces\":[{\"description\":\"Leaderboard + to evaluate image-to-3D models.\",\"id\":\"dylanebert/3d-arena\"},{\"description\":\"Image-to-3D + demo with mesh outputs.\",\"id\":\"TencentARC/InstantMesh\"},{\"description\":\"Image-to-3D + demo with mesh outputs.\",\"id\":\"stabilityai/TripoSR\"},{\"description\":\"Image-to-3D + demo with mesh outputs.\",\"id\":\"hwjiang/Real3D\"},{\"description\":\"Image-to-3D + demo with splat outputs.\",\"id\":\"dylanebert/LGM-mini\"}],\"summary\":\"Image-to-3D + models take in image input and produce 3D output.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"image-to-3d\",\"label\":\"Image-to-3D\",\"libraries\":[\"diffusers\"]}}" + headers: + Access-Control-Allow-Origin: + - https://huggingface.co + Access-Control-Expose-Headers: + - X-Repo-Commit,X-Request-Id,X-Error-Code,X-Error-Message,X-Total-Count,ETag,Link,Accept-Ranges,Content-Range + Connection: + - keep-alive + Content-Length: + - '73726' + Content-Type: + - application/json; charset=utf-8 + Date: + - Thu, 31 Oct 2024 13:12:59 GMT + ETag: + - W/"11ffe-LYeRHzSmA1Ja2XTx5UWNPygcpnc" + Referrer-Policy: + - strict-origin-when-cross-origin + Vary: + - Origin + Via: + - 1.1 d03f5e49ef8a75531152544d3c363680.cloudfront.net (CloudFront) + X-Amz-Cf-Id: + - ooy1fRJXbPd-hUBVpX6iVbzbstiwCap_HUIGjJJwwdivFha_7f6Pmg== + X-Amz-Cf-Pop: + - CCU50-P1 + X-Cache: + - Miss from cloudfront + X-Powered-By: + - huggingface-moon + X-Request-Id: + - Root=1-6723825b-4cd7b0c81624bf566b542678;d21fca71-302a-4a16-ad77-f92d5ea0859b + cross-origin-opener-policy: + - same-origin + status: + code: 200 + message: OK +- request: + body: '{"inputs": {"question": "What''s my name?", "context": "My name is Clara + and I live in Berkeley."}}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate, br, zstd + Connection: + - keep-alive + Content-Length: + - '98' + Content-Type: + - application/json + X-Amzn-Trace-Id: + - 8c075962-578b-439a-be39-667b85cddae1 + user-agent: + - unknown/None; hf_hub/0.26.2; python/3.9.12; torch/2.4.1 + method: POST + uri: https://api-inference.huggingface.co/models/deepset/roberta-base-squad2 + response: + body: + string: '{"score":0.9326568841934204,"start":11,"end":16,"answer":"Clara"}' + headers: + Connection: + - keep-alive + Content-Length: + - '65' + Content-Type: + - application/json + Date: + - Thu, 31 Oct 2024 13:13:00 GMT + access-control-allow-credentials: + - 'true' + vary: + - Origin, Access-Control-Request-Method, Access-Control-Request-Headers + x-compute-time: + - '0.061' + x-compute-type: + - cache + x-request-id: + - ZNSBETLlopHC6gfAAj7yt + x-sha: + - adc3b06f79f797d1c575d5479d6f5efe54a9e3b4 + status: + code: 200 + message: OK +version: 1 diff --git a/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_question_answering_async.yaml b/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_question_answering_async.yaml new file mode 100644 index 00000000000..b20419d9b1a --- /dev/null +++ b/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_question_answering_async.yaml @@ -0,0 +1,772 @@ +interactions: +- request: + body: null + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate, br, zstd + Connection: + - keep-alive + X-Amzn-Trace-Id: + - 6e943ac3-ec7b-4d82-a12a-29e94a30403c + user-agent: + - unknown/None; hf_hub/0.26.2; python/3.9.12; torch/2.4.1 + method: GET + uri: https://huggingface.co/api/tasks + response: + body: + string: "{\"any-to-any\":{\"datasets\":[],\"demo\":{\"inputs\":[],\"outputs\":[]},\"isPlaceholder\":true,\"metrics\":[],\"models\":[],\"spaces\":[],\"summary\":\"\",\"widgetModels\":[],\"id\":\"any-to-any\",\"label\":\"Any-to-Any\",\"libraries\":[\"transformers\"]},\"audio-classification\":{\"datasets\":[{\"description\":\"A + benchmark of 10 different audio tasks.\",\"id\":\"s3prl/superb\"},{\"description\":\"A + dataset of YouTube clips and their sound categories.\",\"id\":\"agkphysics/AudioSet\"}],\"demo\":{\"inputs\":[{\"filename\":\"audio.wav\",\"type\":\"audio\"}],\"outputs\":[{\"data\":[{\"label\":\"Up\",\"score\":0.2},{\"label\":\"Down\",\"score\":0.8}],\"type\":\"chart\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"An + easy-to-use model for command recognition.\",\"id\":\"speechbrain/google_speech_command_xvector\"},{\"description\":\"An + emotion recognition model.\",\"id\":\"ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition\"},{\"description\":\"A + language identification model.\",\"id\":\"facebook/mms-lid-126\"}],\"spaces\":[{\"description\":\"An + application that can classify music into different genre.\",\"id\":\"kurianbenoy/audioclassification\"}],\"summary\":\"Audio + classification is the task of assigning a label or class to a given audio. + It can be used for recognizing which command a user is giving or the emotion + of a statement, as well as identifying a speaker.\",\"widgetModels\":[\"MIT/ast-finetuned-audioset-10-10-0.4593\"],\"youtubeId\":\"KWwzcmG98Ds\",\"id\":\"audio-classification\",\"label\":\"Audio + Classification\",\"libraries\":[\"speechbrain\",\"transformers\",\"transformers.js\"]},\"audio-to-audio\":{\"datasets\":[{\"description\":\"512-element + X-vector embeddings of speakers from CMU ARCTIC dataset.\",\"id\":\"Matthijs/cmu-arctic-xvectors\"}],\"demo\":{\"inputs\":[{\"filename\":\"input.wav\",\"type\":\"audio\"}],\"outputs\":[{\"filename\":\"label-0.wav\",\"type\":\"audio\"},{\"filename\":\"label-1.wav\",\"type\":\"audio\"}]},\"metrics\":[{\"description\":\"The + Signal-to-Noise ratio is the relationship between the target signal level + and the background noise level. It is calculated as the logarithm of the target + signal divided by the background noise, in decibels.\",\"id\":\"snri\"},{\"description\":\"The + Signal-to-Distortion ratio is the relationship between the target signal and + the sum of noise, interference, and artifact errors\",\"id\":\"sdri\"}],\"models\":[{\"description\":\"A + solid model of audio source separation.\",\"id\":\"speechbrain/sepformer-wham\"},{\"description\":\"A + speech enhancement model.\",\"id\":\"ResembleAI/resemble-enhance\"},{\"description\":\"A + model that can change the voice in a speech recording.\",\"id\":\"microsoft/speecht5_vc\"}],\"spaces\":[{\"description\":\"An + application for speech separation.\",\"id\":\"younver/speechbrain-speech-separation\"},{\"description\":\"An + application for audio style transfer.\",\"id\":\"nakas/audio-diffusion_style_transfer\"}],\"summary\":\"Audio-to-Audio + is a family of tasks in which the input is an audio and the output is one + or multiple generated audios. Some example tasks are speech enhancement and + source separation.\",\"widgetModels\":[\"speechbrain/sepformer-wham\"],\"youtubeId\":\"iohj7nCCYoM\",\"id\":\"audio-to-audio\",\"label\":\"Audio-to-Audio\",\"libraries\":[\"asteroid\",\"fairseq\",\"speechbrain\"]},\"automatic-speech-recognition\":{\"datasets\":[{\"description\":\"31,175 + hours of multilingual audio-text dataset in 108 languages.\",\"id\":\"mozilla-foundation/common_voice_17_0\"},{\"description\":\"A + dataset with 44.6k hours of English speaker data and 6k hours of other language + speakers.\",\"id\":\"parler-tts/mls_eng\"},{\"description\":\"A multi-lingual + audio dataset with 370K hours of audio.\",\"id\":\"espnet/yodas\"}],\"demo\":{\"inputs\":[{\"filename\":\"input.flac\",\"type\":\"audio\"}],\"outputs\":[{\"label\":\"Transcript\",\"content\":\"Going + along slushy country roads and speaking to damp audiences in...\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"wer\"},{\"description\":\"\",\"id\":\"cer\"}],\"models\":[{\"description\":\"A + powerful ASR model by OpenAI.\",\"id\":\"openai/whisper-large-v3\"},{\"description\":\"A + good generic speech model by MetaAI for fine-tuning.\",\"id\":\"facebook/w2v-bert-2.0\"},{\"description\":\"An + end-to-end model that performs ASR and Speech Translation by MetaAI.\",\"id\":\"facebook/seamless-m4t-v2-large\"},{\"description\":\"Powerful + speaker diarization model.\",\"id\":\"pyannote/speaker-diarization-3.1\"}],\"spaces\":[{\"description\":\"A + powerful general-purpose speech recognition application.\",\"id\":\"hf-audio/whisper-large-v3\"},{\"description\":\"Fastest + speech recognition application.\",\"id\":\"sanchit-gandhi/whisper-jax\"},{\"description\":\"A + high quality speech and text translation model by Meta.\",\"id\":\"facebook/seamless_m4t\"}],\"summary\":\"Automatic + Speech Recognition (ASR), also known as Speech to Text (STT), is the task + of transcribing a given audio to text. It has many applications, such as voice + user interfaces.\",\"widgetModels\":[\"openai/whisper-large-v3\"],\"youtubeId\":\"TksaY_FDgnk\",\"id\":\"automatic-speech-recognition\",\"label\":\"Automatic + Speech Recognition\",\"libraries\":[\"espnet\",\"nemo\",\"speechbrain\",\"transformers\",\"transformers.js\"]},\"depth-estimation\":{\"datasets\":[{\"description\":\"NYU + Depth V2 Dataset: Video dataset containing both RGB and depth sensor data.\",\"id\":\"sayakpaul/nyu_depth_v2\"},{\"description\":\"Monocular + depth estimation benchmark based without noise and errors.\",\"id\":\"depth-anything/DA-2K\"}],\"demo\":{\"inputs\":[{\"filename\":\"depth-estimation-input.jpg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"depth-estimation-output.png\",\"type\":\"img\"}]},\"metrics\":[],\"models\":[{\"description\":\"Cutting-edge + depth estimation model.\",\"id\":\"depth-anything/Depth-Anything-V2-Large\"},{\"description\":\"A + strong monocular depth estimation model.\",\"id\":\"jingheya/lotus-depth-g-v1-0\"},{\"description\":\"A + depth estimation model that predicts depth in videos.\",\"id\":\"tencent/DepthCrafter\"},{\"description\":\"A + robust depth estimation model.\",\"id\":\"apple/DepthPro\"}],\"spaces\":[{\"description\":\"An + application that predicts the depth of an image and then reconstruct the 3D + model as voxels.\",\"id\":\"radames/dpt-depth-estimation-3d-voxels\"},{\"description\":\"An + application for bleeding-edge depth estimation.\",\"id\":\"akhaliq/depth-pro\"},{\"description\":\"An + application on cutting-edge depth estimation in videos.\",\"id\":\"tencent/DepthCrafter\"},{\"description\":\"A + human-centric depth estimation application.\",\"id\":\"facebook/sapiens-depth\"}],\"summary\":\"Depth + estimation is the task of predicting depth of the objects present in an image.\",\"widgetModels\":[\"\"],\"youtubeId\":\"\",\"id\":\"depth-estimation\",\"label\":\"Depth + Estimation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"document-question-answering\":{\"datasets\":[{\"description\":\"Largest + document understanding dataset.\",\"id\":\"HuggingFaceM4/Docmatix\"},{\"description\":\"Dataset + from the 2020 DocVQA challenge. The documents are taken from the UCSF Industry + Documents Library.\",\"id\":\"eliolio/docvqa\"}],\"demo\":{\"inputs\":[{\"label\":\"Question\",\"content\":\"What + is the idea behind the consumer relations efficiency team?\",\"type\":\"text\"},{\"filename\":\"document-question-answering-input.png\",\"type\":\"img\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"Balance + cost efficiency with quality customer service\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"The + evaluation metric for the DocVQA challenge is the Average Normalized Levenshtein + Similarity (ANLS). This metric is flexible to character regognition errors + and compares the predicted answer with the ground truth answer.\",\"id\":\"anls\"},{\"description\":\"Exact + Match is a metric based on the strict character match of the predicted answer + and the right answer. For answers predicted correctly, the Exact Match will + be 1. Even if only one character is different, Exact Match will be 0\",\"id\":\"exact-match\"}],\"models\":[{\"description\":\"A + robust document question answering model.\",\"id\":\"impira/layoutlm-document-qa\"},{\"description\":\"A + document question answering model specialized in invoices.\",\"id\":\"impira/layoutlm-invoices\"},{\"description\":\"A + special model for OCR-free document question answering.\",\"id\":\"microsoft/udop-large\"},{\"description\":\"A + powerful model for document question answering.\",\"id\":\"google/pix2struct-docvqa-large\"}],\"spaces\":[{\"description\":\"A + robust document question answering application.\",\"id\":\"impira/docquery\"},{\"description\":\"An + application that can answer questions from invoices.\",\"id\":\"impira/invoices\"},{\"description\":\"An + application to compare different document question answering models.\",\"id\":\"merve/compare_docvqa_models\"}],\"summary\":\"Document + Question Answering (also known as Document Visual Question Answering) is the + task of answering questions on document images. Document question answering + models take a (document, question) pair as input and return an answer in natural + language. Models usually rely on multi-modal features, combining text, position + of words (bounding-boxes) and image.\",\"widgetModels\":[\"impira/layoutlm-invoices\"],\"youtubeId\":\"\",\"id\":\"document-question-answering\",\"label\":\"Document + Question Answering\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"feature-extraction\":{\"datasets\":[{\"description\":\"Wikipedia + dataset containing cleaned articles of all languages. Can be used to train + `feature-extraction` models.\",\"id\":\"wikipedia\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"India, + officially the Republic of India, is a country in South Asia.\",\"type\":\"text\"}],\"outputs\":[{\"table\":[[\"Dimension + 1\",\"Dimension 2\",\"Dimension 3\"],[\"2.583383083343506\",\"2.757075071334839\",\"0.9023529887199402\"],[\"8.29393482208252\",\"1.1071064472198486\",\"2.03399395942688\"],[\"-0.7754912972450256\",\"-1.647324562072754\",\"-0.6113331913948059\"],[\"0.07087723910808563\",\"1.5942802429199219\",\"1.4610432386398315\"]],\"type\":\"tabular\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + powerful feature extraction model for natural language processing tasks.\",\"id\":\"thenlper/gte-large\"},{\"description\":\"A + strong feature extraction model for retrieval.\",\"id\":\"Alibaba-NLP/gte-Qwen1.5-7B-instruct\"}],\"spaces\":[{\"description\":\"A + leaderboard to rank text feature extraction models based on a benchmark.\",\"id\":\"mteb/leaderboard\"},{\"description\":\"A + leaderboard to rank best feature extraction models based on human feedback.\",\"id\":\"mteb/arena\"}],\"summary\":\"Feature + extraction is the task of extracting features learnt in a model.\",\"widgetModels\":[\"facebook/bart-base\"],\"id\":\"feature-extraction\",\"label\":\"Feature + Extraction\",\"libraries\":[\"sentence-transformers\",\"transformers\",\"transformers.js\"]},\"fill-mask\":{\"datasets\":[{\"description\":\"A + common dataset that is used to train models for many languages.\",\"id\":\"wikipedia\"},{\"description\":\"A + large English dataset with text crawled from the web.\",\"id\":\"c4\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"The + barked at me\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"wolf\",\"score\":0.487},{\"label\":\"dog\",\"score\":0.061},{\"label\":\"cat\",\"score\":0.058},{\"label\":\"fox\",\"score\":0.047},{\"label\":\"squirrel\",\"score\":0.025}]}]},\"metrics\":[{\"description\":\"Cross + Entropy is a metric that calculates the difference between two probability + distributions. Each probability distribution is the distribution of predicted + words\",\"id\":\"cross_entropy\"},{\"description\":\"Perplexity is the exponential + of the cross-entropy loss. It evaluates the probabilities assigned to the + next word by the model. Lower perplexity indicates better performance\",\"id\":\"perplexity\"}],\"models\":[{\"description\":\"The + famous BERT model.\",\"id\":\"google-bert/bert-base-uncased\"},{\"description\":\"A + multilingual model trained on 100 languages.\",\"id\":\"FacebookAI/xlm-roberta-base\"}],\"spaces\":[],\"summary\":\"Masked + language modeling is the task of masking some of the words in a sentence and + predicting which words should replace those masks. These models are useful + when we want to get a statistical understanding of the language in which the + model is trained in.\",\"widgetModels\":[\"distilroberta-base\"],\"youtubeId\":\"mqElG5QJWUg\",\"id\":\"fill-mask\",\"label\":\"Fill-Mask\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"image-classification\":{\"datasets\":[{\"description\":\"Benchmark + dataset used for image classification with images that belong to 100 classes.\",\"id\":\"cifar100\"},{\"description\":\"Dataset + consisting of images of garments.\",\"id\":\"fashion_mnist\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-classification-input.jpeg\",\"type\":\"img\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Egyptian + cat\",\"score\":0.514},{\"label\":\"Tabby cat\",\"score\":0.193},{\"label\":\"Tiger + cat\",\"score\":0.068}]}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + strong image classification model.\",\"id\":\"google/vit-base-patch16-224\"},{\"description\":\"A + robust image classification model.\",\"id\":\"facebook/deit-base-distilled-patch16-224\"},{\"description\":\"A + strong image classification model.\",\"id\":\"facebook/convnext-large-224\"}],\"spaces\":[{\"description\":\"An + application that classifies what a given image is about.\",\"id\":\"nielsr/perceiver-image-classification\"}],\"summary\":\"Image + classification is the task of assigning a label or class to an entire image. + Images are expected to have only one class for each image. Image classification + models take an image as input and return a prediction about which class the + image belongs to.\",\"widgetModels\":[\"google/vit-base-patch16-224\"],\"youtubeId\":\"tjAIM7BOYhw\",\"id\":\"image-classification\",\"label\":\"Image + Classification\",\"libraries\":[\"keras\",\"timm\",\"transformers\",\"transformers.js\"]},\"image-feature-extraction\":{\"datasets\":[{\"description\":\"ImageNet-1K + is a image classification dataset in which images are used to train image-feature-extraction + models.\",\"id\":\"imagenet-1k\"}],\"demo\":{\"inputs\":[{\"filename\":\"mask-generation-input.png\",\"type\":\"img\"}],\"outputs\":[{\"table\":[[\"Dimension + 1\",\"Dimension 2\",\"Dimension 3\"],[\"0.21236686408519745\",\"1.0919708013534546\",\"0.8512550592422485\"],[\"0.809657871723175\",\"-0.18544459342956543\",\"-0.7851548194885254\"],[\"1.3103108406066895\",\"-0.2479034662246704\",\"-0.9107287526130676\"],[\"1.8536205291748047\",\"-0.36419737339019775\",\"0.09717650711536407\"]],\"type\":\"tabular\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + powerful image feature extraction model.\",\"id\":\"timm/vit_large_patch14_dinov2.lvd142m\"},{\"description\":\"A + strong image feature extraction model.\",\"id\":\"nvidia/MambaVision-T-1K\"},{\"description\":\"A + robust image feature extraction model.\",\"id\":\"facebook/dino-vitb16\"},{\"description\":\"Strong + image feature extraction model made for information retrieval from documents.\",\"id\":\"vidore/colpali\"},{\"description\":\"Strong + image feature extraction model that can be used on images and documents.\",\"id\":\"OpenGVLab/InternViT-6B-448px-V1-2\"}],\"spaces\":[],\"summary\":\"Image + feature extraction is the task of extracting features learnt in a computer + vision model.\",\"widgetModels\":[],\"id\":\"image-feature-extraction\",\"label\":\"Image + Feature Extraction\",\"libraries\":[\"timm\",\"transformers\"]},\"image-segmentation\":{\"datasets\":[{\"description\":\"Scene + segmentation dataset.\",\"id\":\"scene_parse_150\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-segmentation-input.jpeg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"image-segmentation-output.png\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"Average + Precision (AP) is the Area Under the PR Curve (AUC-PR). It is calculated for + each semantic class separately\",\"id\":\"Average Precision\"},{\"description\":\"Mean + Average Precision (mAP) is the overall average of the AP values\",\"id\":\"Mean + Average Precision\"},{\"description\":\"Intersection over Union (IoU) is the + overlap of segmentation masks. Mean IoU is the average of the IoU of all semantic + classes\",\"id\":\"Mean Intersection over Union\"},{\"description\":\"AP\u03B1 + is the Average Precision at the IoU threshold of a \u03B1 value, for example, + AP50 and AP75\",\"id\":\"AP\u03B1\"}],\"models\":[{\"description\":\"Solid + semantic segmentation model trained on ADE20k.\",\"id\":\"openmmlab/upernet-convnext-small\"},{\"description\":\"Background + removal model.\",\"id\":\"briaai/RMBG-1.4\"},{\"description\":\"A multipurpose + image segmentation model for high resolution images.\",\"id\":\"ZhengPeng7/BiRefNet\"},{\"description\":\"Powerful + human-centric image segmentation model.\",\"id\":\"facebook/sapiens-seg-1b\"},{\"description\":\"Panoptic + segmentation model trained on the COCO (common objects) dataset.\",\"id\":\"facebook/mask2former-swin-large-coco-panoptic\"}],\"spaces\":[{\"description\":\"A + semantic segmentation application that can predict unseen instances out of + the box.\",\"id\":\"facebook/ov-seg\"},{\"description\":\"One of the strongest + segmentation applications.\",\"id\":\"jbrinkma/segment-anything\"},{\"description\":\"A + human-centric segmentation model.\",\"id\":\"facebook/sapiens-pose\"},{\"description\":\"An + instance segmentation application to predict neuronal cell types from microscopy + images.\",\"id\":\"rashmi/sartorius-cell-instance-segmentation\"},{\"description\":\"An + application that segments videos.\",\"id\":\"ArtGAN/Segment-Anything-Video\"},{\"description\":\"An + panoptic segmentation application built for outdoor environments.\",\"id\":\"segments/panoptic-segment-anything\"}],\"summary\":\"Image + Segmentation divides an image into segments where each pixel in the image + is mapped to an object. This task has multiple variants such as instance segmentation, + panoptic segmentation and semantic segmentation.\",\"widgetModels\":[\"nvidia/segformer-b0-finetuned-ade-512-512\"],\"youtubeId\":\"dKE8SIt9C-w\",\"id\":\"image-segmentation\",\"label\":\"Image + Segmentation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"image-to-image\":{\"datasets\":[{\"description\":\"Synthetic + dataset, for image relighting\",\"id\":\"VIDIT\"},{\"description\":\"Multiple + images of celebrities, used for facial expression translation\",\"id\":\"huggan/CelebA-faces\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-to-image-input.jpeg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"image-to-image-output.png\",\"type\":\"img\"}]},\"isPlaceholder\":false,\"metrics\":[{\"description\":\"Peak + Signal to Noise Ratio (PSNR) is an approximation of the human perception, + considering the ratio of the absolute intensity with respect to the variations. + Measured in dB, a high value indicates a high fidelity.\",\"id\":\"PSNR\"},{\"description\":\"Structural + Similarity Index (SSIM) is a perceptual metric which compares the luminance, + contrast and structure of two images. The values of SSIM range between -1 + and 1, and higher values indicate closer resemblance to the original image.\",\"id\":\"SSIM\"},{\"description\":\"Inception + Score (IS) is an analysis of the labels predicted by an image classification + model when presented with a sample of the generated images.\",\"id\":\"IS\"}],\"models\":[{\"description\":\"An + image-to-image model to improve image resolution.\",\"id\":\"fal/AuraSR-v2\"},{\"description\":\"A + model that increases the resolution of an image.\",\"id\":\"keras-io/super-resolution\"},{\"description\":\"A + model that creates a set of variations of the input image in the style of + DALL-E using Stable Diffusion.\",\"id\":\"lambdalabs/sd-image-variations-diffusers\"},{\"description\":\"A + model that generates images based on segments in the input image and the text + prompt.\",\"id\":\"mfidabel/controlnet-segment-anything\"},{\"description\":\"A + model that takes an image and an instruction to edit the image.\",\"id\":\"timbrooks/instruct-pix2pix\"}],\"spaces\":[{\"description\":\"Image + enhancer application for low light.\",\"id\":\"keras-io/low-light-image-enhancement\"},{\"description\":\"Style + transfer application.\",\"id\":\"keras-io/neural-style-transfer\"},{\"description\":\"An + application that generates images based on segment control.\",\"id\":\"mfidabel/controlnet-segment-anything\"},{\"description\":\"Image + generation application that takes image control and text prompt.\",\"id\":\"hysts/ControlNet\"},{\"description\":\"Colorize + any image using this app.\",\"id\":\"ioclab/brightness-controlnet\"},{\"description\":\"Edit + images with instructions.\",\"id\":\"timbrooks/instruct-pix2pix\"}],\"summary\":\"Image-to-image + is the task of transforming an input image through a variety of possible manipulations + and enhancements, such as super-resolution, image inpainting, colorization, + and more.\",\"widgetModels\":[\"stabilityai/stable-diffusion-2-inpainting\"],\"youtubeId\":\"\",\"id\":\"image-to-image\",\"label\":\"Image-to-Image\",\"libraries\":[\"diffusers\",\"transformers\",\"transformers.js\"]},\"image-text-to-text\":{\"datasets\":[{\"description\":\"Instructions + composed of image and text.\",\"id\":\"liuhaotian/LLaVA-Instruct-150K\"},{\"description\":\"Conversation + turns where questions involve image and text.\",\"id\":\"liuhaotian/LLaVA-Pretrain\"},{\"description\":\"A + collection of datasets made for model fine-tuning.\",\"id\":\"HuggingFaceM4/the_cauldron\"},{\"description\":\"Screenshots + of websites with their HTML/CSS codes.\",\"id\":\"HuggingFaceM4/WebSight\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-text-to-text-input.png\",\"type\":\"img\"},{\"label\":\"Text + Prompt\",\"content\":\"Describe the position of the bee in detail.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"The + bee is sitting on a pink flower, surrounded by other flowers. The bee is positioned + in the center of the flower, with its head and front legs sticking out.\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"Powerful + vision language model with great visual understanding and reasoning capabilities.\",\"id\":\"meta-llama/Llama-3.2-11B-Vision-Instruct\"},{\"description\":\"Cutting-edge + vision language models.\",\"id\":\"allenai/Molmo-7B-D-0924\"},{\"description\":\"Small + yet powerful model.\",\"id\":\"vikhyatk/moondream2\"},{\"description\":\"Strong + image-text-to-text model.\",\"id\":\"Qwen/Qwen2-VL-7B-Instruct\"},{\"description\":\"Strong + image-text-to-text model.\",\"id\":\"mistralai/Pixtral-12B-2409\"},{\"description\":\"Strong + image-text-to-text model focused on documents.\",\"id\":\"stepfun-ai/GOT-OCR2_0\"}],\"spaces\":[{\"description\":\"Leaderboard + to evaluate vision language models.\",\"id\":\"opencompass/open_vlm_leaderboard\"},{\"description\":\"Vision + language models arena, where models are ranked by votes of users.\",\"id\":\"WildVision/vision-arena\"},{\"description\":\"Powerful + vision-language model assistant.\",\"id\":\"akhaliq/Molmo-7B-D-0924\"},{\"description\":\"An + image-text-to-text application focused on documents.\",\"id\":\"stepfun-ai/GOT_official_online_demo\"},{\"description\":\"An + application to compare outputs of different vision language models.\",\"id\":\"merve/compare_VLMs\"},{\"description\":\"An + application for chatting with an image-text-to-text model.\",\"id\":\"GanymedeNil/Qwen2-VL-7B\"}],\"summary\":\"Image-text-to-text + models take in an image and text prompt and output text. These models are + also called vision-language models, or VLMs. The difference from image-to-text + models is that these models take an additional text input, not restricting + the model to certain use cases like image captioning, and may also be trained + to accept a conversation as input.\",\"widgetModels\":[\"meta-llama/Llama-3.2-11B-Vision-Instruct\"],\"youtubeId\":\"IoGaGfU1CIg\",\"id\":\"image-text-to-text\",\"label\":\"Image-Text-to-Text\",\"libraries\":[\"transformers\"]},\"image-to-text\":{\"datasets\":[{\"description\":\"Dataset + from 12M image-text of Reddit\",\"id\":\"red_caps\"},{\"description\":\"Dataset + from 3.3M images of Google\",\"id\":\"datasets/conceptual_captions\"}],\"demo\":{\"inputs\":[{\"filename\":\"savanna.jpg\",\"type\":\"img\"}],\"outputs\":[{\"label\":\"Detailed + description\",\"content\":\"a herd of giraffes and zebras grazing in a field\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + robust image captioning model.\",\"id\":\"Salesforce/blip2-opt-2.7b\"},{\"description\":\"A + powerful and accurate image-to-text model that can also localize concepts + in images.\",\"id\":\"microsoft/kosmos-2-patch14-224\"},{\"description\":\"A + strong optical character recognition model.\",\"id\":\"facebook/nougat-base\"},{\"description\":\"A + powerful model that lets you have a conversation with the image.\",\"id\":\"llava-hf/llava-1.5-7b-hf\"}],\"spaces\":[{\"description\":\"An + application that compares various image captioning models.\",\"id\":\"nielsr/comparing-captioning-models\"},{\"description\":\"A + robust image captioning application.\",\"id\":\"flax-community/image-captioning\"},{\"description\":\"An + application that transcribes handwritings into text.\",\"id\":\"nielsr/TrOCR-handwritten\"},{\"description\":\"An + application that can caption images and answer questions about a given image.\",\"id\":\"Salesforce/BLIP\"},{\"description\":\"An + application that can caption images and answer questions with a conversational + agent.\",\"id\":\"Salesforce/BLIP2\"},{\"description\":\"An image captioning + application that demonstrates the effect of noise on captions.\",\"id\":\"johko/capdec-image-captioning\"}],\"summary\":\"Image + to text models output a text from a given image. Image captioning or optical + character recognition can be considered as the most common applications of + image to text.\",\"widgetModels\":[\"Salesforce/blip-image-captioning-large\"],\"youtubeId\":\"\",\"id\":\"image-to-text\",\"label\":\"Image-to-Text\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"keypoint-detection\":{\"datasets\":[{\"description\":\"A + dataset of hand keypoints of over 500k examples.\",\"id\":\"Vincent-luo/hagrid-mediapipe-hands\"}],\"demo\":{\"inputs\":[{\"filename\":\"keypoint-detection-input.png\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"keypoint-detection-output.png\",\"type\":\"img\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + robust keypoint detection model.\",\"id\":\"magic-leap-community/superpoint\"},{\"description\":\"Strong + keypoint detection model used to detect human pose.\",\"id\":\"facebook/sapiens-pose-1b\"}],\"spaces\":[{\"description\":\"An + application that detects hand keypoints in real-time.\",\"id\":\"datasciencedojo/Hand-Keypoint-Detection-Realtime\"},{\"description\":\"An + application to try a universal keypoint detection model.\",\"id\":\"merve/SuperPoint\"}],\"summary\":\"Keypoint + detection is the task of identifying meaningful distinctive points or features + in an image.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"keypoint-detection\",\"label\":\"Keypoint + Detection\",\"libraries\":[\"transformers\"]},\"mask-generation\":{\"datasets\":[],\"demo\":{\"inputs\":[{\"filename\":\"mask-generation-input.png\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"mask-generation-output.png\",\"type\":\"img\"}]},\"metrics\":[],\"models\":[{\"description\":\"Small + yet powerful mask generation model.\",\"id\":\"Zigeng/SlimSAM-uniform-50\"},{\"description\":\"Very + strong mask generation model.\",\"id\":\"facebook/sam2-hiera-large\"}],\"spaces\":[{\"description\":\"An + application that combines a mask generation model with a zero-shot object + detection model for text-guided image segmentation.\",\"id\":\"merve/OWLSAM2\"},{\"description\":\"An + application that compares the performance of a large and a small mask generation + model.\",\"id\":\"merve/slimsam\"},{\"description\":\"An application based + on an improved mask generation model.\",\"id\":\"SkalskiP/segment-anything-model-2\"},{\"description\":\"An + application to remove objects from videos using mask generation models.\",\"id\":\"SkalskiP/SAM_and_ProPainter\"}],\"summary\":\"Mask + generation is the task of generating masks that identify a specific object + or region of interest in a given image. Masks are often used in segmentation + tasks, where they provide a precise way to isolate the object of interest + for further processing or analysis.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"mask-generation\",\"label\":\"Mask + Generation\",\"libraries\":[\"transformers\"]},\"object-detection\":{\"datasets\":[{\"description\":\"Widely + used benchmark dataset for multiple vision tasks.\",\"id\":\"merve/coco2017\"},{\"description\":\"Multi-task + computer vision benchmark.\",\"id\":\"merve/pascal-voc\"}],\"demo\":{\"inputs\":[{\"filename\":\"object-detection-input.jpg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"object-detection-output.jpg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + Average Precision (AP) metric is the Area Under the PR Curve (AUC-PR). It + is calculated for each class separately\",\"id\":\"Average Precision\"},{\"description\":\"The + Mean Average Precision (mAP) metric is the overall average of the AP values\",\"id\":\"Mean + Average Precision\"},{\"description\":\"The AP\u03B1 metric is the Average + Precision at the IoU threshold of a \u03B1 value, for example, AP50 and AP75\",\"id\":\"AP\u03B1\"}],\"models\":[{\"description\":\"Solid + object detection model pre-trained on the COCO 2017 dataset.\",\"id\":\"facebook/detr-resnet-50\"},{\"description\":\"Real-time + and accurate object detection model.\",\"id\":\"jameslahm/yolov10x\"},{\"description\":\"Fast + and accurate object detection model trained on COCO and Object365 datasets.\",\"id\":\"PekingU/rtdetr_r18vd_coco_o365\"}],\"spaces\":[{\"description\":\"Leaderboard + to compare various object detection models across several metrics.\",\"id\":\"hf-vision/object_detection_leaderboard\"},{\"description\":\"An + application that contains various object detection models to try from.\",\"id\":\"Gradio-Blocks/Object-Detection-With-DETR-and-YOLOS\"},{\"description\":\"An + application that shows multiple cutting edge techniques for object detection + and tracking.\",\"id\":\"kadirnar/torchyolo\"},{\"description\":\"An object + tracking, segmentation and inpainting application.\",\"id\":\"VIPLab/Track-Anything\"},{\"description\":\"Very + fast object tracking application based on object detection.\",\"id\":\"merve/RT-DETR-tracking-coco\"}],\"summary\":\"Object + Detection models allow users to identify objects of certain defined classes. + Object detection models receive an image as input and output the images with + bounding boxes and labels on detected objects.\",\"widgetModels\":[\"facebook/detr-resnet-50\"],\"youtubeId\":\"WdAeKSOpxhw\",\"id\":\"object-detection\",\"label\":\"Object + Detection\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"video-classification\":{\"datasets\":[{\"description\":\"Benchmark + dataset used for video classification with videos that belong to 400 classes.\",\"id\":\"kinetics400\"}],\"demo\":{\"inputs\":[{\"filename\":\"video-classification-input.gif\",\"type\":\"img\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Playing + Guitar\",\"score\":0.514},{\"label\":\"Playing Tennis\",\"score\":0.193},{\"label\":\"Cooking\",\"score\":0.068}]}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"Strong + Video Classification model trained on the Kinetics 400 dataset.\",\"id\":\"google/vivit-b-16x2-kinetics400\"},{\"description\":\"Strong + Video Classification model trained on the Kinetics 400 dataset.\",\"id\":\"microsoft/xclip-base-patch32\"}],\"spaces\":[{\"description\":\"An + application that classifies video at different timestamps.\",\"id\":\"nateraw/lavila\"},{\"description\":\"An + application that classifies video.\",\"id\":\"fcakyon/video-classification\"}],\"summary\":\"Video + classification is the task of assigning a label or class to an entire video. + Videos are expected to have only one class for each video. Video classification + models take a video as input and return a prediction about which class the + video belongs to.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"video-classification\",\"label\":\"Video + Classification\",\"libraries\":[\"transformers\"]},\"question-answering\":{\"datasets\":[{\"description\":\"A + famous question answering dataset based on English articles from Wikipedia.\",\"id\":\"squad_v2\"},{\"description\":\"A + dataset of aggregated anonymized actual queries issued to the Google search + engine.\",\"id\":\"natural_questions\"}],\"demo\":{\"inputs\":[{\"label\":\"Question\",\"content\":\"Which + name is also used to describe the Amazon rainforest in English?\",\"type\":\"text\"},{\"label\":\"Context\",\"content\":\"The + Amazon rainforest, also known in English as Amazonia or the Amazon Jungle\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"Amazonia\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Exact + Match is a metric based on the strict character match of the predicted answer + and the right answer. For answers predicted correctly, the Exact Match will + be 1. Even if only one character is different, Exact Match will be 0\",\"id\":\"exact-match\"},{\"description\":\" + The F1-Score metric is useful if we value both false positives and false negatives + equally. The F1-Score is calculated on each word in the predicted sequence + against the correct answer\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + robust baseline model for most question answering domains.\",\"id\":\"deepset/roberta-base-squad2\"},{\"description\":\"Small + yet robust model that can answer questions.\",\"id\":\"distilbert/distilbert-base-cased-distilled-squad\"},{\"description\":\"A + special model that can answer questions from tables.\",\"id\":\"google/tapas-base-finetuned-wtq\"}],\"spaces\":[{\"description\":\"An + application that can answer a long question from Wikipedia.\",\"id\":\"deepset/wikipedia-assistant\"}],\"summary\":\"Question + Answering models can retrieve the answer to a question from a given text, + which is useful for searching for an answer in a document. Some question answering + models can generate answers without context!\",\"widgetModels\":[\"deepset/roberta-base-squad2\"],\"youtubeId\":\"ajPx5LwJD-I\",\"id\":\"question-answering\",\"label\":\"Question + Answering\",\"libraries\":[\"adapter-transformers\",\"allennlp\",\"transformers\",\"transformers.js\"]},\"reinforcement-learning\":{\"datasets\":[{\"description\":\"A + curation of widely used datasets for Data Driven Deep Reinforcement Learning + (D4RL)\",\"id\":\"edbeeching/decision_transformer_gym_replay\"}],\"demo\":{\"inputs\":[{\"label\":\"State\",\"content\":\"Red + traffic light, pedestrians are about to pass.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Action\",\"content\":\"Stop + the car.\",\"type\":\"text\"},{\"label\":\"Next State\",\"content\":\"Yellow + light, pedestrians have crossed.\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Accumulated + reward across all time steps discounted by a factor that ranges between 0 + and 1 and determines how much the agent optimizes for future relative to immediate + rewards. Measures how good is the policy ultimately found by a given algorithm + considering uncertainty over the future.\",\"id\":\"Discounted Total Reward\"},{\"description\":\"Average + return obtained after running the policy for a certain number of evaluation + episodes. As opposed to total reward, mean reward considers how much reward + a given algorithm receives while learning.\",\"id\":\"Mean Reward\"},{\"description\":\"Measures + how good a given algorithm is after a predefined time. Some algorithms may + be guaranteed to converge to optimal behavior across many time steps. However, + an agent that reaches an acceptable level of optimality after a given time + horizon may be preferable to one that ultimately reaches optimality but takes + a long time.\",\"id\":\"Level of Performance After Some Time\"}],\"models\":[{\"description\":\"A + Reinforcement Learning model trained on expert data from the Gym Hopper environment\",\"id\":\"edbeeching/decision-transformer-gym-hopper-expert\"},{\"description\":\"A + PPO agent playing seals/CartPole-v0 using the stable-baselines3 library and + the RL Zoo.\",\"id\":\"HumanCompatibleAI/ppo-seals-CartPole-v0\"}],\"spaces\":[{\"description\":\"An + application for a cute puppy agent learning to catch a stick.\",\"id\":\"ThomasSimonini/Huggy\"},{\"description\":\"An + application to play Snowball Fight with a reinforcement learning agent.\",\"id\":\"ThomasSimonini/SnowballFight\"}],\"summary\":\"Reinforcement + learning is the computational approach of learning from action by interacting + with an environment through trial and error and receiving rewards (negative + or positive) as feedback\",\"widgetModels\":[],\"youtubeId\":\"q0BiUn5LiBc\",\"id\":\"reinforcement-learning\",\"label\":\"Reinforcement + Learning\",\"libraries\":[\"transformers\",\"stable-baselines3\",\"ml-agents\",\"sample-factory\"]},\"sentence-similarity\":{\"datasets\":[{\"description\":\"Bing + queries with relevant passages from various web sources.\",\"id\":\"ms_marco\"}],\"demo\":{\"inputs\":[{\"label\":\"Source + sentence\",\"content\":\"Machine learning is so easy.\",\"type\":\"text\"},{\"label\":\"Sentences + to compare to\",\"content\":\"Deep learning is so straightforward.\",\"type\":\"text\"},{\"label\":\"\",\"content\":\"This + is so difficult, like rocket science.\",\"type\":\"text\"},{\"label\":\"\",\"content\":\"I + can't believe how much I struggled with this.\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Deep + learning is so straightforward.\",\"score\":0.623},{\"label\":\"This is so + difficult, like rocket science.\",\"score\":0.413},{\"label\":\"I can't believe + how much I struggled with this.\",\"score\":0.256}]}]},\"metrics\":[{\"description\":\"Reciprocal + Rank is a measure used to rank the relevancy of documents given a set of documents. + Reciprocal Rank is the reciprocal of the rank of the document retrieved, meaning, + if the rank is 3, the Reciprocal Rank is 0.33. If the rank is 1, the Reciprocal + Rank is 1\",\"id\":\"Mean Reciprocal Rank\"},{\"description\":\"The similarity + of the embeddings is evaluated mainly on cosine similarity. It is calculated + as the cosine of the angle between two vectors. It is particularly useful + when your texts are not the same length\",\"id\":\"Cosine Similarity\"}],\"models\":[{\"description\":\"This + model works well for sentences and paragraphs and can be used for clustering/grouping + and semantic searches.\",\"id\":\"sentence-transformers/all-mpnet-base-v2\"},{\"description\":\"A + multilingual robust sentence similarity model..\",\"id\":\"BAAI/bge-m3\"}],\"spaces\":[{\"description\":\"An + application that leverages sentence similarity to answer questions from YouTube + videos.\",\"id\":\"Gradio-Blocks/Ask_Questions_To_YouTube_Videos\"},{\"description\":\"An + application that retrieves relevant PubMed abstracts for a given online article + which can be used as further references.\",\"id\":\"Gradio-Blocks/pubmed-abstract-retriever\"},{\"description\":\"An + application that leverages sentence similarity to summarize text.\",\"id\":\"nickmuchi/article-text-summarizer\"},{\"description\":\"A + guide that explains how Sentence Transformers can be used for semantic search.\",\"id\":\"sentence-transformers/Sentence_Transformers_for_semantic_search\"}],\"summary\":\"Sentence + Similarity is the task of determining how similar two texts are. Sentence + similarity models convert input texts into vectors (embeddings) that capture + semantic information and calculate how close (similar) they are between them. + This task is particularly useful for information retrieval and clustering/grouping.\",\"widgetModels\":[\"BAAI/bge-small-en-v1.5\"],\"youtubeId\":\"VCZq5AkbNEU\",\"id\":\"sentence-similarity\",\"label\":\"Sentence + Similarity\",\"libraries\":[\"sentence-transformers\",\"spacy\",\"transformers.js\"]},\"summarization\":{\"canonicalId\":\"text2text-generation\",\"datasets\":[{\"description\":\"News + articles in five different languages along with their summaries. Widely used + for benchmarking multilingual summarization models.\",\"id\":\"mlsum\"},{\"description\":\"English + conversations and their summaries. Useful for benchmarking conversational + agents.\",\"id\":\"samsum\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"The + tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey + building, and the tallest structure in Paris. Its base is square, measuring + 125 metres (410 ft) on each side. It was the first structure to reach a height + of 300 metres. Excluding transmitters, the Eiffel Tower is the second tallest + free-standing structure in France after the Millau Viaduct.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Output\",\"content\":\"The + tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey + building. It was the first structure to reach a height of 300 metres.\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"The + generated sequence is compared against its summary, and the overlap of tokens + are counted. ROUGE-N refers to overlap of N subsequent tokens, ROUGE-1 refers + to overlap of single tokens and ROUGE-2 is the overlap of two subsequent tokens.\",\"id\":\"rouge\"}],\"models\":[{\"description\":\"A + strong summarization model trained on English news articles. Excels at generating + factual summaries.\",\"id\":\"facebook/bart-large-cnn\"},{\"description\":\"A + summarization model trained on medical articles.\",\"id\":\"Falconsai/medical_summarization\"}],\"spaces\":[{\"description\":\"An + application that can summarize long paragraphs.\",\"id\":\"pszemraj/summarize-long-text\"},{\"description\":\"A + much needed summarization application for terms and conditions.\",\"id\":\"ml6team/distilbart-tos-summarizer-tosdr\"},{\"description\":\"An + application that summarizes long documents.\",\"id\":\"pszemraj/document-summarization\"},{\"description\":\"An + application that can detect errors in abstractive summarization.\",\"id\":\"ml6team/post-processing-summarization\"}],\"summary\":\"Summarization + is the task of producing a shorter version of a document while preserving + its important information. Some models can extract text from the original + input, while other models can generate entirely new text.\",\"widgetModels\":[\"facebook/bart-large-cnn\"],\"youtubeId\":\"yHnr5Dk2zCI\",\"id\":\"summarization\",\"label\":\"Summarization\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"table-question-answering\":{\"datasets\":[{\"description\":\"The + WikiTableQuestions dataset is a large-scale dataset for the task of question + answering on semi-structured tables.\",\"id\":\"wikitablequestions\"},{\"description\":\"WikiSQL + is a dataset of 80654 hand-annotated examples of questions and SQL queries + distributed across 24241 tables from Wikipedia.\",\"id\":\"wikisql\"}],\"demo\":{\"inputs\":[{\"table\":[[\"Rank\",\"Name\",\"No.of + reigns\",\"Combined days\"],[\"1\",\"lou Thesz\",\"3\",\"3749\"],[\"2\",\"Ric + Flair\",\"8\",\"3103\"],[\"3\",\"Harley Race\",\"7\",\"1799\"]],\"type\":\"tabular\"},{\"label\":\"Question\",\"content\":\"What + is the number of reigns for Harley Race?\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Result\",\"content\":\"7\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Checks + whether the predicted answer(s) is the same as the ground-truth answer(s).\",\"id\":\"Denotation + Accuracy\"}],\"models\":[{\"description\":\"A table question answering model + that is capable of neural SQL execution, i.e., employ TAPEX to execute a SQL + query on a given table.\",\"id\":\"microsoft/tapex-base\"},{\"description\":\"A + robust table question answering model.\",\"id\":\"google/tapas-base-finetuned-wtq\"}],\"spaces\":[{\"description\":\"An + application that answers questions based on table CSV files.\",\"id\":\"katanaml/table-query\"}],\"summary\":\"Table + Question Answering (Table QA) is the answering a question about an information + on a given table.\",\"widgetModels\":[\"google/tapas-base-finetuned-wtq\"],\"id\":\"table-question-answering\",\"label\":\"Table + Question Answering\",\"libraries\":[\"transformers\"]},\"tabular-classification\":{\"datasets\":[{\"description\":\"A + comprehensive curation of datasets covering all benchmarks.\",\"id\":\"inria-soda/tabular-benchmark\"}],\"demo\":{\"inputs\":[{\"table\":[[\"Glucose\",\"Blood + Pressure \",\"Skin Thickness\",\"Insulin\",\"BMI\"],[\"148\",\"72\",\"35\",\"0\",\"33.6\"],[\"150\",\"50\",\"30\",\"0\",\"35.1\"],[\"141\",\"60\",\"29\",\"1\",\"39.2\"]],\"type\":\"tabular\"}],\"outputs\":[{\"table\":[[\"Diabetes\"],[\"1\"],[\"1\"],[\"0\"]],\"type\":\"tabular\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"Breast + cancer prediction model based on decision trees.\",\"id\":\"scikit-learn/cancer-prediction-trees\"}],\"spaces\":[{\"description\":\"An + application that can predict defective products on a production line.\",\"id\":\"scikit-learn/tabular-playground\"},{\"description\":\"An + application that compares various tabular classification techniques on different + datasets.\",\"id\":\"scikit-learn/classification\"}],\"summary\":\"Tabular + classification is the task of classifying a target category (a group) based + on set of attributes.\",\"widgetModels\":[\"scikit-learn/tabular-playground\"],\"youtubeId\":\"\",\"id\":\"tabular-classification\",\"label\":\"Tabular + Classification\",\"libraries\":[\"sklearn\"]},\"tabular-regression\":{\"datasets\":[{\"description\":\"A + comprehensive curation of datasets covering all benchmarks.\",\"id\":\"inria-soda/tabular-benchmark\"}],\"demo\":{\"inputs\":[{\"table\":[[\"Car + Name\",\"Horsepower\",\"Weight\"],[\"ford torino\",\"140\",\"3,449\"],[\"amc + hornet\",\"97\",\"2,774\"],[\"toyota corolla\",\"65\",\"1,773\"]],\"type\":\"tabular\"}],\"outputs\":[{\"table\":[[\"MPG + (miles per gallon)\"],[\"17\"],[\"18\"],[\"31\"]],\"type\":\"tabular\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"mse\"},{\"description\":\"Coefficient + of determination (or R-squared) is a measure of how well the model fits the + data. Higher R-squared is considered a better fit.\",\"id\":\"r-squared\"}],\"models\":[{\"description\":\"Fish + weight prediction based on length measurements and species.\",\"id\":\"scikit-learn/Fish-Weight\"}],\"spaces\":[{\"description\":\"An + application that can predict weight of a fish based on set of attributes.\",\"id\":\"scikit-learn/fish-weight-prediction\"}],\"summary\":\"Tabular + regression is the task of predicting a numerical value given a set of attributes.\",\"widgetModels\":[\"scikit-learn/Fish-Weight\"],\"youtubeId\":\"\",\"id\":\"tabular-regression\",\"label\":\"Tabular + Regression\",\"libraries\":[\"sklearn\"]},\"text-classification\":{\"datasets\":[{\"description\":\"A + widely used dataset used to benchmark multiple variants of text classification.\",\"id\":\"nyu-mll/glue\"},{\"description\":\"A + text classification dataset used to benchmark natural language inference models\",\"id\":\"stanfordnlp/snli\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"I + love Hugging Face!\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"POSITIVE\",\"score\":0.9},{\"label\":\"NEUTRAL\",\"score\":0.1},{\"label\":\"NEGATIVE\",\"score\":0}]}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"The + F1 metric is the harmonic mean of the precision and recall. It can be calculated + as: F1 = 2 * (precision * recall) / (precision + recall)\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + robust model trained for sentiment analysis.\",\"id\":\"distilbert/distilbert-base-uncased-finetuned-sst-2-english\"},{\"description\":\"A + sentiment analysis model specialized in financial sentiment.\",\"id\":\"ProsusAI/finbert\"},{\"description\":\"A + sentiment analysis model specialized in analyzing tweets.\",\"id\":\"cardiffnlp/twitter-roberta-base-sentiment-latest\"},{\"description\":\"A + model that can classify languages.\",\"id\":\"papluca/xlm-roberta-base-language-detection\"},{\"description\":\"A + model that can classify text generation attacks.\",\"id\":\"meta-llama/Prompt-Guard-86M\"}],\"spaces\":[{\"description\":\"An + application that can classify financial sentiment.\",\"id\":\"IoannisTr/Tech_Stocks_Trading_Assistant\"},{\"description\":\"A + dashboard that contains various text classification tasks.\",\"id\":\"miesnerjacob/Multi-task-NLP\"},{\"description\":\"An + application that analyzes user reviews in healthcare.\",\"id\":\"spacy/healthsea-demo\"}],\"summary\":\"Text + Classification is the task of assigning a label or class to a given text. + Some use cases are sentiment analysis, natural language inference, and assessing + grammatical correctness.\",\"widgetModels\":[\"distilbert/distilbert-base-uncased-finetuned-sst-2-english\"],\"youtubeId\":\"leNG9fN9FQU\",\"id\":\"text-classification\",\"label\":\"Text + Classification\",\"libraries\":[\"adapter-transformers\",\"setfit\",\"spacy\",\"transformers\",\"transformers.js\"]},\"text-generation\":{\"datasets\":[{\"description\":\"A + large multilingual dataset of text crawled from the web.\",\"id\":\"mc4\"},{\"description\":\"Diverse + open-source data consisting of 22 smaller high-quality datasets. It was used + to train GPT-Neo.\",\"id\":\"the_pile\"},{\"description\":\"Truly open-source, + curated and cleaned dialogue dataset.\",\"id\":\"HuggingFaceH4/ultrachat_200k\"},{\"description\":\"An + instruction dataset with preference ratings on responses.\",\"id\":\"openbmb/UltraFeedback\"},{\"description\":\"A + large synthetic dataset for alignment of text generation models.\",\"id\":\"argilla/magpie-ultra-v0.1\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"Once + upon a time,\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Output\",\"content\":\"Once + upon a time, we knew that our ancestors were on the verge of extinction. The + great explorers and poets of the Old World, from Alexander the Great to Chaucer, + are dead and gone. A good many of our ancient explorers and poets have\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Cross + Entropy is a metric that calculates the difference between two probability + distributions. Each probability distribution is the distribution of predicted + words\",\"id\":\"Cross Entropy\"},{\"description\":\"The Perplexity metric + is the exponential of the cross-entropy loss. It evaluates the probabilities + assigned to the next word by the model. Lower perplexity indicates better + performance\",\"id\":\"Perplexity\"}],\"models\":[{\"description\":\"A text-generation + model trained to follow instructions.\",\"id\":\"google/gemma-2-2b-it\"},{\"description\":\"Very + powerful text generation model trained to follow instructions.\",\"id\":\"meta-llama/Meta-Llama-3.1-8B-Instruct\"},{\"description\":\"Small + yet powerful text generation model.\",\"id\":\"microsoft/Phi-3-mini-4k-instruct\"},{\"description\":\"A + very powerful model that can solve mathematical problems.\",\"id\":\"AI-MO/NuminaMath-7B-TIR\"},{\"description\":\"Strong + text generation model to follow instructions.\",\"id\":\"Qwen/Qwen2.5-7B-Instruct\"},{\"description\":\"Very + strong open-source large language model.\",\"id\":\"nvidia/Llama-3.1-Nemotron-70B-Instruct\"}],\"spaces\":[{\"description\":\"A + leaderboard to compare different open-source text generation models based + on various benchmarks.\",\"id\":\"open-llm-leaderboard/open_llm_leaderboard\"},{\"description\":\"A + leaderboard for comparing chain-of-thought performance of models.\",\"id\":\"logikon/open_cot_leaderboard\"},{\"description\":\"An + text generation based application based on a very powerful LLaMA2 model.\",\"id\":\"ysharma/Explore_llamav2_with_TGI\"},{\"description\":\"An + text generation based application to converse with Zephyr model.\",\"id\":\"HuggingFaceH4/zephyr-chat\"},{\"description\":\"A + leaderboard that ranks text generation models based on blind votes from people.\",\"id\":\"lmsys/chatbot-arena-leaderboard\"},{\"description\":\"An + chatbot to converse with a very powerful text generation model.\",\"id\":\"mlabonne/phixtral-chat\"}],\"summary\":\"Generating + text is the task of generating new text given another text. These models can, + for example, fill in incomplete text or paraphrase.\",\"widgetModels\":[\"mistralai/Mistral-Nemo-Instruct-2407\"],\"youtubeId\":\"e9gNEAlsOvU\",\"id\":\"text-generation\",\"label\":\"Text + Generation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"text-to-image\":{\"datasets\":[{\"description\":\"RedCaps + is a large-scale dataset of 12M image-text pairs collected from Reddit.\",\"id\":\"red_caps\"},{\"description\":\"Conceptual + Captions is a dataset consisting of ~3.3M images annotated with captions.\",\"id\":\"conceptual_captions\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"A + city above clouds, pastel colors, Victorian style\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"image.jpeg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + Inception Score (IS) measure assesses diversity and meaningfulness. It uses + a generated image sample to predict its label. A higher score signifies more + diverse and meaningful images.\",\"id\":\"IS\"},{\"description\":\"The Fr\xE9chet + Inception Distance (FID) calculates the distance between distributions between + synthetic and real samples. A lower FID score indicates better similarity + between the distributions of real and generated images.\",\"id\":\"FID\"},{\"description\":\"R-precision + assesses how the generated image aligns with the provided text description. + It uses the generated images as queries to retrieve relevant text descriptions. + The top 'r' relevant descriptions are selected and used to calculate R-precision + as r/R, where 'R' is the number of ground truth descriptions associated with + the generated images. A higher R-precision value indicates a better model.\",\"id\":\"R-Precision\"}],\"models\":[{\"description\":\"One + of the most powerful image generation models that can generate realistic outputs.\",\"id\":\"black-forest-labs/FLUX.1-dev\"},{\"description\":\"A + powerful yet fast image generation model.\",\"id\":\"latent-consistency/lcm-lora-sdxl\"},{\"description\":\"Text-to-image + model for photorealistic generation.\",\"id\":\"Kwai-Kolors/Kolors\"},{\"description\":\"A + powerful text-to-image model.\",\"id\":\"stabilityai/stable-diffusion-3-medium-diffusers\"}],\"spaces\":[{\"description\":\"A + powerful text-to-image application.\",\"id\":\"stabilityai/stable-diffusion-3-medium\"},{\"description\":\"A + text-to-image application to generate comics.\",\"id\":\"jbilcke-hf/ai-comic-factory\"},{\"description\":\"An + application to match multiple custom image generation models.\",\"id\":\"multimodalart/flux-lora-lab\"},{\"description\":\"A + powerful yet very fast image generation application.\",\"id\":\"latent-consistency/lcm-lora-for-sdxl\"},{\"description\":\"A + gallery to explore various text-to-image models.\",\"id\":\"multimodalart/LoraTheExplorer\"},{\"description\":\"An + application for `text-to-image`, `image-to-image` and image inpainting.\",\"id\":\"ArtGAN/Stable-Diffusion-ControlNet-WebUI\"},{\"description\":\"An + application to generate realistic images given photos of a person and a prompt.\",\"id\":\"InstantX/InstantID\"}],\"summary\":\"Text-to-image + is the task of generating images from input text. These pipelines can also + be used to modify and edit images based on text prompts.\",\"widgetModels\":[\"black-forest-labs/FLUX.1-dev\"],\"youtubeId\":\"\",\"id\":\"text-to-image\",\"label\":\"Text-to-Image\",\"libraries\":[\"diffusers\"]},\"text-to-speech\":{\"canonicalId\":\"text-to-audio\",\"datasets\":[{\"description\":\"10K + hours of multi-speaker English dataset.\",\"id\":\"parler-tts/mls_eng_10k\"},{\"description\":\"Multi-speaker + English dataset.\",\"id\":\"mythicinfinity/libritts_r\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"I + love audio models on the Hub!\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"audio.wav\",\"type\":\"audio\"}]},\"metrics\":[{\"description\":\"The + Mel Cepstral Distortion (MCD) metric is used to calculate the quality of generated + speech.\",\"id\":\"mel cepstral distortion\"}],\"models\":[{\"description\":\"A + powerful TTS model.\",\"id\":\"parler-tts/parler-tts-large-v1\"},{\"description\":\"A + massively multi-lingual TTS model.\",\"id\":\"coqui/XTTS-v2\"},{\"description\":\"Robust + TTS model.\",\"id\":\"metavoiceio/metavoice-1B-v0.1\"},{\"description\":\"A + prompt based, powerful TTS model.\",\"id\":\"parler-tts/parler_tts_mini_v0.1\"}],\"spaces\":[{\"description\":\"An + application for generate highly realistic, multilingual speech.\",\"id\":\"suno/bark\"},{\"description\":\"An + application on XTTS, a voice generation model that lets you clone voices into + different languages.\",\"id\":\"coqui/xtts\"},{\"description\":\"An application + that generates speech in different styles in English and Chinese.\",\"id\":\"mrfakename/E2-F5-TTS\"},{\"description\":\"An + application that synthesizes speech for diverse speaker prompts.\",\"id\":\"parler-tts/parler_tts_mini\"}],\"summary\":\"Text-to-Speech + (TTS) is the task of generating natural sounding speech given text input. + TTS models can be extended to have a single model that generates speech for + multiple speakers and multiple languages.\",\"widgetModels\":[\"suno/bark\"],\"youtubeId\":\"NW62DpzJ274\",\"id\":\"text-to-speech\",\"label\":\"Text-to-Speech\",\"libraries\":[\"espnet\",\"tensorflowtts\",\"transformers\",\"transformers.js\"]},\"text-to-video\":{\"datasets\":[{\"description\":\"Microsoft + Research Video to Text is a large-scale dataset for open domain video captioning\",\"id\":\"iejMac/CLIP-MSR-VTT\"},{\"description\":\"UCF101 + Human Actions dataset consists of 13,320 video clips from YouTube, with 101 + classes.\",\"id\":\"quchenyuan/UCF101-ZIP\"},{\"description\":\"A high-quality + dataset for human action recognition in YouTube videos.\",\"id\":\"nateraw/kinetics\"},{\"description\":\"A + dataset of video clips of humans performing pre-defined basic actions with + everyday objects.\",\"id\":\"HuggingFaceM4/something_something_v2\"},{\"description\":\"This + dataset consists of text-video pairs and contains noisy samples with irrelevant + video descriptions\",\"id\":\"HuggingFaceM4/webvid\"},{\"description\":\"A + dataset of short Flickr videos for the temporal localization of events with + descriptions.\",\"id\":\"iejMac/CLIP-DiDeMo\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"Darth + Vader is surfing on the waves.\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"text-to-video-output.gif\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"Inception + Score uses an image classification model that predicts class labels and evaluates + how distinct and diverse the images are. A higher score indicates better video + generation.\",\"id\":\"is\"},{\"description\":\"Frechet Inception Distance + uses an image classification model to obtain image embeddings. The metric + compares mean and standard deviation of the embeddings of real and generated + images. A smaller score indicates better video generation.\",\"id\":\"fid\"},{\"description\":\"Frechet + Video Distance uses a model that captures coherence for changes in frames + and the quality of each frame. A smaller score indicates better video generation.\",\"id\":\"fvd\"},{\"description\":\"CLIPSIM + measures similarity between video frames and text using an image-text similarity + model. A higher score indicates better video generation.\",\"id\":\"clipsim\"}],\"models\":[{\"description\":\"A + strong model for consistent video generation.\",\"id\":\"rain1011/pyramid-flow-sd3\"},{\"description\":\"A + robust model for text-to-video generation.\",\"id\":\"VideoCrafter/VideoCrafter2\"},{\"description\":\"A + cutting-edge text-to-video generation model.\",\"id\":\"TIGER-Lab/T2V-Turbo-V2\"}],\"spaces\":[{\"description\":\"An + application that generates video from text.\",\"id\":\"VideoCrafter/VideoCrafter\"},{\"description\":\"Consistent + video generation application.\",\"id\":\"TIGER-Lab/T2V-Turbo-V2\"},{\"description\":\"A + cutting edge video generation application.\",\"id\":\"Pyramid-Flow/pyramid-flow\"}],\"summary\":\"Text-to-video + models can be used in any application that requires generating consistent + sequence of images from text. \",\"widgetModels\":[],\"id\":\"text-to-video\",\"label\":\"Text-to-Video\",\"libraries\":[\"diffusers\"]},\"token-classification\":{\"datasets\":[{\"description\":\"A + widely used dataset useful to benchmark named entity recognition models.\",\"id\":\"eriktks/conll2003\"},{\"description\":\"A + multilingual dataset of Wikipedia articles annotated for named entity recognition + in over 150 different languages.\",\"id\":\"unimelb-nlp/wikiann\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"My + name is Omar and I live in Z\xFCrich.\",\"type\":\"text\"}],\"outputs\":[{\"text\":\"My + name is Omar and I live in Z\xFCrich.\",\"tokens\":[{\"type\":\"PERSON\",\"start\":11,\"end\":15},{\"type\":\"GPE\",\"start\":30,\"end\":36}],\"type\":\"text-with-tokens\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + robust performance model to identify people, locations, organizations and + names of miscellaneous entities.\",\"id\":\"dslim/bert-base-NER\"},{\"description\":\"A + strong model to identify people, locations, organizations and names in multiple + languages.\",\"id\":\"FacebookAI/xlm-roberta-large-finetuned-conll03-english\"},{\"description\":\"A + token classification model specialized on medical entity recognition.\",\"id\":\"blaze999/Medical-NER\"},{\"description\":\"Flair + models are typically the state of the art in named entity recognition tasks.\",\"id\":\"flair/ner-english\"}],\"spaces\":[{\"description\":\"An + application that can recognizes entities, extracts noun chunks and recognizes + various linguistic features of each token.\",\"id\":\"spacy/gradio_pipeline_visualizer\"}],\"summary\":\"Token + classification is a natural language understanding task in which a label is + assigned to some tokens in a text. Some popular token classification subtasks + are Named Entity Recognition (NER) and Part-of-Speech (PoS) tagging. NER models + could be trained to identify specific entities in a text, such as dates, individuals + and places; and PoS tagging would identify, for example, which words in a + text are verbs, nouns, and punctuation marks.\",\"widgetModels\":[\"FacebookAI/xlm-roberta-large-finetuned-conll03-english\"],\"youtubeId\":\"wVHdVlPScxA\",\"id\":\"token-classification\",\"label\":\"Token + Classification\",\"libraries\":[\"adapter-transformers\",\"flair\",\"spacy\",\"span-marker\",\"stanza\",\"transformers\",\"transformers.js\"]},\"translation\":{\"canonicalId\":\"text2text-generation\",\"datasets\":[{\"description\":\"A + dataset of copyright-free books translated into 16 different languages.\",\"id\":\"Helsinki-NLP/opus_books\"},{\"description\":\"An + example of translation between programming languages. This dataset consists + of functions in Java and C#.\",\"id\":\"google/code_x_glue_cc_code_to_code_trans\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"My + name is Omar and I live in Z\xFCrich.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Output\",\"content\":\"Mein + Name ist Omar und ich wohne in Z\xFCrich.\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"BLEU + score is calculated by counting the number of shared single or subsequent + tokens between the generated sequence and the reference. Subsequent n tokens + are called \u201Cn-grams\u201D. Unigram refers to a single token while bi-gram + refers to token pairs and n-grams refer to n subsequent tokens. The score + ranges from 0 to 1, where 1 means the translation perfectly matched and 0 + did not match at all\",\"id\":\"bleu\"},{\"description\":\"\",\"id\":\"sacrebleu\"}],\"models\":[{\"description\":\"Very + powerful model that can translate many languages between each other, especially + low-resource languages.\",\"id\":\"facebook/nllb-200-1.3B\"},{\"description\":\"A + general-purpose Transformer that can be used to translate from English to + German, French, or Romanian.\",\"id\":\"google-t5/t5-base\"}],\"spaces\":[{\"description\":\"An + application that can translate between 100 languages.\",\"id\":\"Iker/Translate-100-languages\"},{\"description\":\"An + application that can translate between many languages.\",\"id\":\"Geonmo/nllb-translation-demo\"}],\"summary\":\"Translation + is the task of converting text from one language to another.\",\"widgetModels\":[\"facebook/mbart-large-50-many-to-many-mmt\"],\"youtubeId\":\"1JvfrvZgi6c\",\"id\":\"translation\",\"label\":\"Translation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"unconditional-image-generation\":{\"datasets\":[{\"description\":\"The + CIFAR-100 dataset consists of 60000 32x32 colour images in 100 classes, with + 600 images per class.\",\"id\":\"cifar100\"},{\"description\":\"Multiple images + of celebrities, used for facial expression translation.\",\"id\":\"CelebA\"}],\"demo\":{\"inputs\":[{\"label\":\"Seed\",\"content\":\"42\",\"type\":\"text\"},{\"label\":\"Number + of images to generate:\",\"content\":\"4\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"unconditional-image-generation-output.jpeg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + inception score (IS) evaluates the quality of generated images. It measures + the diversity of the generated images (the model predictions are evenly distributed + across all possible labels) and their 'distinction' or 'sharpness' (the model + confidently predicts a single label for each image).\",\"id\":\"Inception + score (IS)\"},{\"description\":\"The Fr\xE9chet Inception Distance (FID) evaluates + the quality of images created by a generative model by calculating the distance + between feature vectors for real and generated images.\",\"id\":\"Fre\u0107het + Inception Distance (FID)\"}],\"models\":[{\"description\":\"High-quality image + generation model trained on the CIFAR-10 dataset. It synthesizes images of + the ten classes presented in the dataset using diffusion probabilistic models, + a class of latent variable models inspired by considerations from nonequilibrium + thermodynamics.\",\"id\":\"google/ddpm-cifar10-32\"},{\"description\":\"High-quality + image generation model trained on the 256x256 CelebA-HQ dataset. It synthesizes + images of faces using diffusion probabilistic models, a class of latent variable + models inspired by considerations from nonequilibrium thermodynamics.\",\"id\":\"google/ddpm-celebahq-256\"}],\"spaces\":[{\"description\":\"An + application that can generate realistic faces.\",\"id\":\"CompVis/celeba-latent-diffusion\"}],\"summary\":\"Unconditional + image generation is the task of generating images with no condition in any + context (like a prompt text or another image). Once trained, the model will + create images that resemble its training data distribution.\",\"widgetModels\":[\"\"],\"youtubeId\":\"\",\"id\":\"unconditional-image-generation\",\"label\":\"Unconditional + Image Generation\",\"libraries\":[\"diffusers\"]},\"video-text-to-text\":{\"datasets\":[{\"description\":\"Multiple-choice + questions and answers about videos.\",\"id\":\"lmms-lab/Video-MME\"},{\"description\":\"A + dataset of instructions and question-answer pairs about videos.\",\"id\":\"lmms-lab/VideoChatGPT\"},{\"description\":\"Large + video understanding dataset.\",\"id\":\"HuggingFaceFV/finevideo\"}],\"demo\":{\"inputs\":[{\"filename\":\"video-text-to-text-input.gif\",\"type\":\"img\"},{\"label\":\"Text + Prompt\",\"content\":\"What is happening in this video?\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"The + video shows a series of images showing a fountain with water jets and a variety + of colorful flowers and butterflies in the background.\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + robust video-text-to-text model that can take in image and video inputs.\",\"id\":\"llava-hf/llava-onevision-qwen2-72b-ov-hf\"},{\"description\":\"Large + and powerful video-text-to-text model that can take in image and video inputs.\",\"id\":\"llava-hf/LLaVA-NeXT-Video-34B-hf\"}],\"spaces\":[{\"description\":\"An + application to chat with a video-text-to-text model.\",\"id\":\"llava-hf/video-llava\"},{\"description\":\"A + leaderboard for various video-text-to-text models.\",\"id\":\"opencompass/openvlm_video_leaderboard\"}],\"summary\":\"Video-text-to-text + models take in a video and a text prompt and output text. These models are + also called video-language models.\",\"widgetModels\":[\"\"],\"youtubeId\":\"\",\"id\":\"video-text-to-text\",\"label\":\"Video-Text-to-Text\",\"libraries\":[\"transformers\"]},\"visual-question-answering\":{\"datasets\":[{\"description\":\"A + widely used dataset containing questions (with answers) about images.\",\"id\":\"Graphcore/vqa\"},{\"description\":\"A + dataset to benchmark visual reasoning based on text in images.\",\"id\":\"facebook/textvqa\"}],\"demo\":{\"inputs\":[{\"filename\":\"elephant.jpeg\",\"type\":\"img\"},{\"label\":\"Question\",\"content\":\"What + is in this image?\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"elephant\",\"score\":0.97},{\"label\":\"elephants\",\"score\":0.06},{\"label\":\"animal\",\"score\":0.003}]}]},\"isPlaceholder\":false,\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"Measures + how much a predicted answer differs from the ground truth based on the difference + in their semantic meaning.\",\"id\":\"wu-palmer similarity\"}],\"models\":[{\"description\":\"A + visual question answering model trained to convert charts and plots to text.\",\"id\":\"google/deplot\"},{\"description\":\"A + visual question answering model trained for mathematical reasoning and chart + derendering from images.\",\"id\":\"google/matcha-base\"},{\"description\":\"A + strong visual question answering that answers questions from book covers.\",\"id\":\"google/pix2struct-ocrvqa-large\"}],\"spaces\":[{\"description\":\"An + application that compares visual question answering models across different + tasks.\",\"id\":\"merve/pix2struct\"},{\"description\":\"An application that + can answer questions based on images.\",\"id\":\"nielsr/vilt-vqa\"},{\"description\":\"An + application that can caption images and answer questions about a given image. + \",\"id\":\"Salesforce/BLIP\"},{\"description\":\"An application that can + caption images and answer questions about a given image. \",\"id\":\"vumichien/Img2Prompt\"}],\"summary\":\"Visual + Question Answering is the task of answering open-ended questions based on + an image. They output natural language responses to natural language questions.\",\"widgetModels\":[\"dandelin/vilt-b32-finetuned-vqa\"],\"youtubeId\":\"\",\"id\":\"visual-question-answering\",\"label\":\"Visual + Question Answering\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"zero-shot-classification\":{\"datasets\":[{\"description\":\"A + widely used dataset used to benchmark multiple variants of text classification.\",\"id\":\"nyu-mll/glue\"},{\"description\":\"The + Multi-Genre Natural Language Inference (MultiNLI) corpus is a crowd-sourced + collection of 433k sentence pairs annotated with textual entailment information.\",\"id\":\"nyu-mll/multi_nli\"},{\"description\":\"FEVER + is a publicly available dataset for fact extraction and verification against + textual sources.\",\"id\":\"fever/fever\"}],\"demo\":{\"inputs\":[{\"label\":\"Text + Input\",\"content\":\"Dune is the best movie ever.\",\"type\":\"text\"},{\"label\":\"Candidate + Labels\",\"content\":\"CINEMA, ART, MUSIC\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"CINEMA\",\"score\":0.9},{\"label\":\"ART\",\"score\":0.1},{\"label\":\"MUSIC\",\"score\":0}]}]},\"metrics\":[],\"models\":[{\"description\":\"Powerful + zero-shot text classification model.\",\"id\":\"facebook/bart-large-mnli\"},{\"description\":\"Powerful + zero-shot multilingual text classification model that can accomplish multiple + tasks.\",\"id\":\"MoritzLaurer/mDeBERTa-v3-base-xnli-multilingual-nli-2mil7\"}],\"spaces\":[],\"summary\":\"Zero-shot + text classification is a task in natural language processing where a model + is trained on a set of labeled examples but is then able to classify new examples + from previously unseen classes.\",\"widgetModels\":[\"facebook/bart-large-mnli\"],\"id\":\"zero-shot-classification\",\"label\":\"Zero-Shot + Classification\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"zero-shot-image-classification\":{\"datasets\":[{\"description\":\"\",\"id\":\"\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-classification-input.jpeg\",\"type\":\"img\"},{\"label\":\"Classes\",\"content\":\"cat, + dog, bird\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Cat\",\"score\":0.664},{\"label\":\"Dog\",\"score\":0.329},{\"label\":\"Bird\",\"score\":0.008}]}]},\"metrics\":[{\"description\":\"Computes + the number of times the correct label appears in top K labels predicted\",\"id\":\"top-K + accuracy\"}],\"models\":[{\"description\":\"Robust image classification model + trained on publicly available image-caption data.\",\"id\":\"openai/clip-vit-base-patch16\"},{\"description\":\"Strong + zero-shot image classification model.\",\"id\":\"google/siglip-so400m-patch14-224\"},{\"description\":\"Small + yet powerful zero-shot image classification model that can run on edge devices.\",\"id\":\"apple/MobileCLIP-S1-OpenCLIP\"},{\"description\":\"Strong + image classification model for biomedical domain.\",\"id\":\"microsoft/BiomedCLIP-PubMedBERT_256-vit_base_patch16_224\"}],\"spaces\":[{\"description\":\"An + application that leverages zero-shot image classification to find best captions + to generate an image. \",\"id\":\"pharma/CLIP-Interrogator\"},{\"description\":\"An + application to compare different zero-shot image classification models. \",\"id\":\"merve/compare_clip_siglip\"}],\"summary\":\"Zero-shot + image classification is the task of classifying previously unseen classes + during training of a model.\",\"widgetModels\":[\"google/siglip-so400m-patch14-224\"],\"youtubeId\":\"\",\"id\":\"zero-shot-image-classification\",\"label\":\"Zero-Shot + Image Classification\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"zero-shot-object-detection\":{\"datasets\":[],\"demo\":{\"inputs\":[{\"filename\":\"zero-shot-object-detection-input.jpg\",\"type\":\"img\"},{\"label\":\"Classes\",\"content\":\"cat, + dog, bird\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"zero-shot-object-detection-output.jpg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + Average Precision (AP) metric is the Area Under the PR Curve (AUC-PR). It + is calculated for each class separately\",\"id\":\"Average Precision\"},{\"description\":\"The + Mean Average Precision (mAP) metric is the overall average of the AP values\",\"id\":\"Mean + Average Precision\"},{\"description\":\"The AP\u03B1 metric is the Average + Precision at the IoU threshold of a \u03B1 value, for example, AP50 and AP75\",\"id\":\"AP\u03B1\"}],\"models\":[{\"description\":\"Solid + zero-shot object detection model.\",\"id\":\"IDEA-Research/grounding-dino-base\"},{\"description\":\"Cutting-edge + zero-shot object detection model.\",\"id\":\"google/owlv2-base-patch16-ensemble\"}],\"spaces\":[{\"description\":\"A + demo to try the state-of-the-art zero-shot object detection model, OWLv2.\",\"id\":\"merve/owlv2\"},{\"description\":\"A + demo that combines a zero-shot object detection and mask generation model + for zero-shot segmentation.\",\"id\":\"merve/OWLSAM\"}],\"summary\":\"Zero-shot + object detection is a computer vision task to detect objects and their classes + in images, without any prior training or knowledge of the classes. Zero-shot + object detection models receive an image as input, as well as a list of candidate + classes, and output the bounding boxes and labels where the objects have been + detected.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"zero-shot-object-detection\",\"label\":\"Zero-Shot + Object Detection\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"text-to-3d\":{\"datasets\":[{\"description\":\"A + large dataset of over 10 million 3D objects.\",\"id\":\"allenai/objaverse-xl\"},{\"description\":\"Descriptive + captions for 3D objects in Objaverse.\",\"id\":\"tiange/Cap3D\"}],\"demo\":{\"inputs\":[{\"label\":\"Prompt\",\"content\":\"a + cat statue\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Result\",\"content\":\"text-to-3d-3d-output-filename.glb\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"Text-to-3D + mesh model by OpenAI\",\"id\":\"openai/shap-e\"},{\"description\":\"Generative + 3D gaussian splatting model.\",\"id\":\"ashawkey/LGM\"}],\"spaces\":[{\"description\":\"Text-to-3D + demo with mesh outputs.\",\"id\":\"hysts/Shap-E\"},{\"description\":\"Text/image-to-3D + demo with splat outputs.\",\"id\":\"ashawkey/LGM\"}],\"summary\":\"Text-to-3D + models take in text input and produce 3D output.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"text-to-3d\",\"label\":\"Text-to-3D\",\"libraries\":[\"diffusers\"]},\"image-to-3d\":{\"datasets\":[{\"description\":\"A + large dataset of over 10 million 3D objects.\",\"id\":\"allenai/objaverse-xl\"},{\"description\":\"A + dataset of isolated object images for evaluating image-to-3D models.\",\"id\":\"dylanebert/iso3d\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-to-3d-image-input.png\",\"type\":\"img\"}],\"outputs\":[{\"label\":\"Result\",\"content\":\"image-to-3d-3d-output-filename.glb\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"Fast + image-to-3D mesh model by Tencent.\",\"id\":\"TencentARC/InstantMesh\"},{\"description\":\"Fast + image-to-3D mesh model by StabilityAI\",\"id\":\"stabilityai/TripoSR\"},{\"description\":\"A + scaled up image-to-3D mesh model derived from TripoSR.\",\"id\":\"hwjiang/Real3D\"},{\"description\":\"Generative + 3D gaussian splatting model.\",\"id\":\"ashawkey/LGM\"}],\"spaces\":[{\"description\":\"Leaderboard + to evaluate image-to-3D models.\",\"id\":\"dylanebert/3d-arena\"},{\"description\":\"Image-to-3D + demo with mesh outputs.\",\"id\":\"TencentARC/InstantMesh\"},{\"description\":\"Image-to-3D + demo with mesh outputs.\",\"id\":\"stabilityai/TripoSR\"},{\"description\":\"Image-to-3D + demo with mesh outputs.\",\"id\":\"hwjiang/Real3D\"},{\"description\":\"Image-to-3D + demo with splat outputs.\",\"id\":\"dylanebert/LGM-mini\"}],\"summary\":\"Image-to-3D + models take in image input and produce 3D output.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"image-to-3d\",\"label\":\"Image-to-3D\",\"libraries\":[\"diffusers\"]}}" + headers: + Access-Control-Allow-Origin: + - https://huggingface.co + Access-Control-Expose-Headers: + - X-Repo-Commit,X-Request-Id,X-Error-Code,X-Error-Message,X-Total-Count,ETag,Link,Accept-Ranges,Content-Range + Connection: + - keep-alive + Content-Length: + - '73726' + Content-Type: + - application/json; charset=utf-8 + Date: + - Thu, 31 Oct 2024 13:13:12 GMT + ETag: + - W/"11ffe-LYeRHzSmA1Ja2XTx5UWNPygcpnc" + Referrer-Policy: + - strict-origin-when-cross-origin + Vary: + - Origin + Via: + - 1.1 1f9395248c2468f012a231db8d5dfd1e.cloudfront.net (CloudFront) + X-Amz-Cf-Id: + - foF8_lN05-CT7fJHFapMNIM7IZs8FpTmJMQJvVMgVdoEvD4ETycCIQ== + X-Amz-Cf-Pop: + - CCU50-P1 + X-Cache: + - Miss from cloudfront + X-Powered-By: + - huggingface-moon + X-Request-Id: + - Root=1-67238268-7d3c5e5a5b1bde036f5e47f6;6e943ac3-ec7b-4d82-a12a-29e94a30403c + cross-origin-opener-policy: + - same-origin + status: + code: 200 + message: OK +- request: + body: null + headers: + user-agent: + - unknown/None; hf_hub/0.26.2; python/3.9.12; torch/2.4.1 + method: POST + uri: https://api-inference.huggingface.co/models/deepset/roberta-base-squad2 + response: + body: + string: '{"score":0.9326568841934204,"start":11,"end":16,"answer":"Clara"}' + headers: + Access-Control-Allow-Credentials: + - 'true' + Connection: + - keep-alive + Content-Length: + - '65' + Content-Type: + - application/json + Date: + - Thu, 31 Oct 2024 13:13:13 GMT + Vary: + - Origin, Access-Control-Request-Method, Access-Control-Request-Headers + x-compute-time: + - '0.061' + x-compute-type: + - cache + x-request-id: + - SFnO3ow972RjsQCM2zw5n + x-sha: + - adc3b06f79f797d1c575d5479d6f5efe54a9e3b4 + status: + code: 200 + message: OK +version: 1 diff --git a/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_sentence_similarity.yaml b/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_sentence_similarity.yaml new file mode 100644 index 00000000000..ac294a49031 --- /dev/null +++ b/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_sentence_similarity.yaml @@ -0,0 +1,786 @@ +interactions: +- request: + body: null + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate, br, zstd + Connection: + - keep-alive + X-Amzn-Trace-Id: + - 979e728c-a1c5-4a29-9fa3-a5eb9016586d + user-agent: + - unknown/None; hf_hub/0.26.2; python/3.9.12; torch/2.4.1 + method: GET + uri: https://huggingface.co/api/tasks + response: + body: + string: "{\"any-to-any\":{\"datasets\":[],\"demo\":{\"inputs\":[],\"outputs\":[]},\"isPlaceholder\":true,\"metrics\":[],\"models\":[],\"spaces\":[],\"summary\":\"\",\"widgetModels\":[],\"id\":\"any-to-any\",\"label\":\"Any-to-Any\",\"libraries\":[\"transformers\"]},\"audio-classification\":{\"datasets\":[{\"description\":\"A + benchmark of 10 different audio tasks.\",\"id\":\"s3prl/superb\"},{\"description\":\"A + dataset of YouTube clips and their sound categories.\",\"id\":\"agkphysics/AudioSet\"}],\"demo\":{\"inputs\":[{\"filename\":\"audio.wav\",\"type\":\"audio\"}],\"outputs\":[{\"data\":[{\"label\":\"Up\",\"score\":0.2},{\"label\":\"Down\",\"score\":0.8}],\"type\":\"chart\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"An + easy-to-use model for command recognition.\",\"id\":\"speechbrain/google_speech_command_xvector\"},{\"description\":\"An + emotion recognition model.\",\"id\":\"ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition\"},{\"description\":\"A + language identification model.\",\"id\":\"facebook/mms-lid-126\"}],\"spaces\":[{\"description\":\"An + application that can classify music into different genre.\",\"id\":\"kurianbenoy/audioclassification\"}],\"summary\":\"Audio + classification is the task of assigning a label or class to a given audio. + It can be used for recognizing which command a user is giving or the emotion + of a statement, as well as identifying a speaker.\",\"widgetModels\":[\"MIT/ast-finetuned-audioset-10-10-0.4593\"],\"youtubeId\":\"KWwzcmG98Ds\",\"id\":\"audio-classification\",\"label\":\"Audio + Classification\",\"libraries\":[\"speechbrain\",\"transformers\",\"transformers.js\"]},\"audio-to-audio\":{\"datasets\":[{\"description\":\"512-element + X-vector embeddings of speakers from CMU ARCTIC dataset.\",\"id\":\"Matthijs/cmu-arctic-xvectors\"}],\"demo\":{\"inputs\":[{\"filename\":\"input.wav\",\"type\":\"audio\"}],\"outputs\":[{\"filename\":\"label-0.wav\",\"type\":\"audio\"},{\"filename\":\"label-1.wav\",\"type\":\"audio\"}]},\"metrics\":[{\"description\":\"The + Signal-to-Noise ratio is the relationship between the target signal level + and the background noise level. It is calculated as the logarithm of the target + signal divided by the background noise, in decibels.\",\"id\":\"snri\"},{\"description\":\"The + Signal-to-Distortion ratio is the relationship between the target signal and + the sum of noise, interference, and artifact errors\",\"id\":\"sdri\"}],\"models\":[{\"description\":\"A + solid model of audio source separation.\",\"id\":\"speechbrain/sepformer-wham\"},{\"description\":\"A + speech enhancement model.\",\"id\":\"ResembleAI/resemble-enhance\"},{\"description\":\"A + model that can change the voice in a speech recording.\",\"id\":\"microsoft/speecht5_vc\"}],\"spaces\":[{\"description\":\"An + application for speech separation.\",\"id\":\"younver/speechbrain-speech-separation\"},{\"description\":\"An + application for audio style transfer.\",\"id\":\"nakas/audio-diffusion_style_transfer\"}],\"summary\":\"Audio-to-Audio + is a family of tasks in which the input is an audio and the output is one + or multiple generated audios. Some example tasks are speech enhancement and + source separation.\",\"widgetModels\":[\"speechbrain/sepformer-wham\"],\"youtubeId\":\"iohj7nCCYoM\",\"id\":\"audio-to-audio\",\"label\":\"Audio-to-Audio\",\"libraries\":[\"asteroid\",\"fairseq\",\"speechbrain\"]},\"automatic-speech-recognition\":{\"datasets\":[{\"description\":\"31,175 + hours of multilingual audio-text dataset in 108 languages.\",\"id\":\"mozilla-foundation/common_voice_17_0\"},{\"description\":\"A + dataset with 44.6k hours of English speaker data and 6k hours of other language + speakers.\",\"id\":\"parler-tts/mls_eng\"},{\"description\":\"A multi-lingual + audio dataset with 370K hours of audio.\",\"id\":\"espnet/yodas\"}],\"demo\":{\"inputs\":[{\"filename\":\"input.flac\",\"type\":\"audio\"}],\"outputs\":[{\"label\":\"Transcript\",\"content\":\"Going + along slushy country roads and speaking to damp audiences in...\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"wer\"},{\"description\":\"\",\"id\":\"cer\"}],\"models\":[{\"description\":\"A + powerful ASR model by OpenAI.\",\"id\":\"openai/whisper-large-v3\"},{\"description\":\"A + good generic speech model by MetaAI for fine-tuning.\",\"id\":\"facebook/w2v-bert-2.0\"},{\"description\":\"An + end-to-end model that performs ASR and Speech Translation by MetaAI.\",\"id\":\"facebook/seamless-m4t-v2-large\"},{\"description\":\"Powerful + speaker diarization model.\",\"id\":\"pyannote/speaker-diarization-3.1\"}],\"spaces\":[{\"description\":\"A + powerful general-purpose speech recognition application.\",\"id\":\"hf-audio/whisper-large-v3\"},{\"description\":\"Fastest + speech recognition application.\",\"id\":\"sanchit-gandhi/whisper-jax\"},{\"description\":\"A + high quality speech and text translation model by Meta.\",\"id\":\"facebook/seamless_m4t\"}],\"summary\":\"Automatic + Speech Recognition (ASR), also known as Speech to Text (STT), is the task + of transcribing a given audio to text. It has many applications, such as voice + user interfaces.\",\"widgetModels\":[\"openai/whisper-large-v3\"],\"youtubeId\":\"TksaY_FDgnk\",\"id\":\"automatic-speech-recognition\",\"label\":\"Automatic + Speech Recognition\",\"libraries\":[\"espnet\",\"nemo\",\"speechbrain\",\"transformers\",\"transformers.js\"]},\"depth-estimation\":{\"datasets\":[{\"description\":\"NYU + Depth V2 Dataset: Video dataset containing both RGB and depth sensor data.\",\"id\":\"sayakpaul/nyu_depth_v2\"},{\"description\":\"Monocular + depth estimation benchmark based without noise and errors.\",\"id\":\"depth-anything/DA-2K\"}],\"demo\":{\"inputs\":[{\"filename\":\"depth-estimation-input.jpg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"depth-estimation-output.png\",\"type\":\"img\"}]},\"metrics\":[],\"models\":[{\"description\":\"Cutting-edge + depth estimation model.\",\"id\":\"depth-anything/Depth-Anything-V2-Large\"},{\"description\":\"A + strong monocular depth estimation model.\",\"id\":\"jingheya/lotus-depth-g-v1-0\"},{\"description\":\"A + depth estimation model that predicts depth in videos.\",\"id\":\"tencent/DepthCrafter\"},{\"description\":\"A + robust depth estimation model.\",\"id\":\"apple/DepthPro\"}],\"spaces\":[{\"description\":\"An + application that predicts the depth of an image and then reconstruct the 3D + model as voxels.\",\"id\":\"radames/dpt-depth-estimation-3d-voxels\"},{\"description\":\"An + application for bleeding-edge depth estimation.\",\"id\":\"akhaliq/depth-pro\"},{\"description\":\"An + application on cutting-edge depth estimation in videos.\",\"id\":\"tencent/DepthCrafter\"},{\"description\":\"A + human-centric depth estimation application.\",\"id\":\"facebook/sapiens-depth\"}],\"summary\":\"Depth + estimation is the task of predicting depth of the objects present in an image.\",\"widgetModels\":[\"\"],\"youtubeId\":\"\",\"id\":\"depth-estimation\",\"label\":\"Depth + Estimation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"document-question-answering\":{\"datasets\":[{\"description\":\"Largest + document understanding dataset.\",\"id\":\"HuggingFaceM4/Docmatix\"},{\"description\":\"Dataset + from the 2020 DocVQA challenge. The documents are taken from the UCSF Industry + Documents Library.\",\"id\":\"eliolio/docvqa\"}],\"demo\":{\"inputs\":[{\"label\":\"Question\",\"content\":\"What + is the idea behind the consumer relations efficiency team?\",\"type\":\"text\"},{\"filename\":\"document-question-answering-input.png\",\"type\":\"img\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"Balance + cost efficiency with quality customer service\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"The + evaluation metric for the DocVQA challenge is the Average Normalized Levenshtein + Similarity (ANLS). This metric is flexible to character regognition errors + and compares the predicted answer with the ground truth answer.\",\"id\":\"anls\"},{\"description\":\"Exact + Match is a metric based on the strict character match of the predicted answer + and the right answer. For answers predicted correctly, the Exact Match will + be 1. Even if only one character is different, Exact Match will be 0\",\"id\":\"exact-match\"}],\"models\":[{\"description\":\"A + robust document question answering model.\",\"id\":\"impira/layoutlm-document-qa\"},{\"description\":\"A + document question answering model specialized in invoices.\",\"id\":\"impira/layoutlm-invoices\"},{\"description\":\"A + special model for OCR-free document question answering.\",\"id\":\"microsoft/udop-large\"},{\"description\":\"A + powerful model for document question answering.\",\"id\":\"google/pix2struct-docvqa-large\"}],\"spaces\":[{\"description\":\"A + robust document question answering application.\",\"id\":\"impira/docquery\"},{\"description\":\"An + application that can answer questions from invoices.\",\"id\":\"impira/invoices\"},{\"description\":\"An + application to compare different document question answering models.\",\"id\":\"merve/compare_docvqa_models\"}],\"summary\":\"Document + Question Answering (also known as Document Visual Question Answering) is the + task of answering questions on document images. Document question answering + models take a (document, question) pair as input and return an answer in natural + language. Models usually rely on multi-modal features, combining text, position + of words (bounding-boxes) and image.\",\"widgetModels\":[\"impira/layoutlm-invoices\"],\"youtubeId\":\"\",\"id\":\"document-question-answering\",\"label\":\"Document + Question Answering\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"feature-extraction\":{\"datasets\":[{\"description\":\"Wikipedia + dataset containing cleaned articles of all languages. Can be used to train + `feature-extraction` models.\",\"id\":\"wikipedia\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"India, + officially the Republic of India, is a country in South Asia.\",\"type\":\"text\"}],\"outputs\":[{\"table\":[[\"Dimension + 1\",\"Dimension 2\",\"Dimension 3\"],[\"2.583383083343506\",\"2.757075071334839\",\"0.9023529887199402\"],[\"8.29393482208252\",\"1.1071064472198486\",\"2.03399395942688\"],[\"-0.7754912972450256\",\"-1.647324562072754\",\"-0.6113331913948059\"],[\"0.07087723910808563\",\"1.5942802429199219\",\"1.4610432386398315\"]],\"type\":\"tabular\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + powerful feature extraction model for natural language processing tasks.\",\"id\":\"thenlper/gte-large\"},{\"description\":\"A + strong feature extraction model for retrieval.\",\"id\":\"Alibaba-NLP/gte-Qwen1.5-7B-instruct\"}],\"spaces\":[{\"description\":\"A + leaderboard to rank text feature extraction models based on a benchmark.\",\"id\":\"mteb/leaderboard\"},{\"description\":\"A + leaderboard to rank best feature extraction models based on human feedback.\",\"id\":\"mteb/arena\"}],\"summary\":\"Feature + extraction is the task of extracting features learnt in a model.\",\"widgetModels\":[\"facebook/bart-base\"],\"id\":\"feature-extraction\",\"label\":\"Feature + Extraction\",\"libraries\":[\"sentence-transformers\",\"transformers\",\"transformers.js\"]},\"fill-mask\":{\"datasets\":[{\"description\":\"A + common dataset that is used to train models for many languages.\",\"id\":\"wikipedia\"},{\"description\":\"A + large English dataset with text crawled from the web.\",\"id\":\"c4\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"The + barked at me\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"wolf\",\"score\":0.487},{\"label\":\"dog\",\"score\":0.061},{\"label\":\"cat\",\"score\":0.058},{\"label\":\"fox\",\"score\":0.047},{\"label\":\"squirrel\",\"score\":0.025}]}]},\"metrics\":[{\"description\":\"Cross + Entropy is a metric that calculates the difference between two probability + distributions. Each probability distribution is the distribution of predicted + words\",\"id\":\"cross_entropy\"},{\"description\":\"Perplexity is the exponential + of the cross-entropy loss. It evaluates the probabilities assigned to the + next word by the model. Lower perplexity indicates better performance\",\"id\":\"perplexity\"}],\"models\":[{\"description\":\"The + famous BERT model.\",\"id\":\"google-bert/bert-base-uncased\"},{\"description\":\"A + multilingual model trained on 100 languages.\",\"id\":\"FacebookAI/xlm-roberta-base\"}],\"spaces\":[],\"summary\":\"Masked + language modeling is the task of masking some of the words in a sentence and + predicting which words should replace those masks. These models are useful + when we want to get a statistical understanding of the language in which the + model is trained in.\",\"widgetModels\":[\"distilroberta-base\"],\"youtubeId\":\"mqElG5QJWUg\",\"id\":\"fill-mask\",\"label\":\"Fill-Mask\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"image-classification\":{\"datasets\":[{\"description\":\"Benchmark + dataset used for image classification with images that belong to 100 classes.\",\"id\":\"cifar100\"},{\"description\":\"Dataset + consisting of images of garments.\",\"id\":\"fashion_mnist\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-classification-input.jpeg\",\"type\":\"img\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Egyptian + cat\",\"score\":0.514},{\"label\":\"Tabby cat\",\"score\":0.193},{\"label\":\"Tiger + cat\",\"score\":0.068}]}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + strong image classification model.\",\"id\":\"google/vit-base-patch16-224\"},{\"description\":\"A + robust image classification model.\",\"id\":\"facebook/deit-base-distilled-patch16-224\"},{\"description\":\"A + strong image classification model.\",\"id\":\"facebook/convnext-large-224\"}],\"spaces\":[{\"description\":\"An + application that classifies what a given image is about.\",\"id\":\"nielsr/perceiver-image-classification\"}],\"summary\":\"Image + classification is the task of assigning a label or class to an entire image. + Images are expected to have only one class for each image. Image classification + models take an image as input and return a prediction about which class the + image belongs to.\",\"widgetModels\":[\"google/vit-base-patch16-224\"],\"youtubeId\":\"tjAIM7BOYhw\",\"id\":\"image-classification\",\"label\":\"Image + Classification\",\"libraries\":[\"keras\",\"timm\",\"transformers\",\"transformers.js\"]},\"image-feature-extraction\":{\"datasets\":[{\"description\":\"ImageNet-1K + is a image classification dataset in which images are used to train image-feature-extraction + models.\",\"id\":\"imagenet-1k\"}],\"demo\":{\"inputs\":[{\"filename\":\"mask-generation-input.png\",\"type\":\"img\"}],\"outputs\":[{\"table\":[[\"Dimension + 1\",\"Dimension 2\",\"Dimension 3\"],[\"0.21236686408519745\",\"1.0919708013534546\",\"0.8512550592422485\"],[\"0.809657871723175\",\"-0.18544459342956543\",\"-0.7851548194885254\"],[\"1.3103108406066895\",\"-0.2479034662246704\",\"-0.9107287526130676\"],[\"1.8536205291748047\",\"-0.36419737339019775\",\"0.09717650711536407\"]],\"type\":\"tabular\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + powerful image feature extraction model.\",\"id\":\"timm/vit_large_patch14_dinov2.lvd142m\"},{\"description\":\"A + strong image feature extraction model.\",\"id\":\"nvidia/MambaVision-T-1K\"},{\"description\":\"A + robust image feature extraction model.\",\"id\":\"facebook/dino-vitb16\"},{\"description\":\"Strong + image feature extraction model made for information retrieval from documents.\",\"id\":\"vidore/colpali\"},{\"description\":\"Strong + image feature extraction model that can be used on images and documents.\",\"id\":\"OpenGVLab/InternViT-6B-448px-V1-2\"}],\"spaces\":[],\"summary\":\"Image + feature extraction is the task of extracting features learnt in a computer + vision model.\",\"widgetModels\":[],\"id\":\"image-feature-extraction\",\"label\":\"Image + Feature Extraction\",\"libraries\":[\"timm\",\"transformers\"]},\"image-segmentation\":{\"datasets\":[{\"description\":\"Scene + segmentation dataset.\",\"id\":\"scene_parse_150\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-segmentation-input.jpeg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"image-segmentation-output.png\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"Average + Precision (AP) is the Area Under the PR Curve (AUC-PR). It is calculated for + each semantic class separately\",\"id\":\"Average Precision\"},{\"description\":\"Mean + Average Precision (mAP) is the overall average of the AP values\",\"id\":\"Mean + Average Precision\"},{\"description\":\"Intersection over Union (IoU) is the + overlap of segmentation masks. Mean IoU is the average of the IoU of all semantic + classes\",\"id\":\"Mean Intersection over Union\"},{\"description\":\"AP\u03B1 + is the Average Precision at the IoU threshold of a \u03B1 value, for example, + AP50 and AP75\",\"id\":\"AP\u03B1\"}],\"models\":[{\"description\":\"Solid + semantic segmentation model trained on ADE20k.\",\"id\":\"openmmlab/upernet-convnext-small\"},{\"description\":\"Background + removal model.\",\"id\":\"briaai/RMBG-1.4\"},{\"description\":\"A multipurpose + image segmentation model for high resolution images.\",\"id\":\"ZhengPeng7/BiRefNet\"},{\"description\":\"Powerful + human-centric image segmentation model.\",\"id\":\"facebook/sapiens-seg-1b\"},{\"description\":\"Panoptic + segmentation model trained on the COCO (common objects) dataset.\",\"id\":\"facebook/mask2former-swin-large-coco-panoptic\"}],\"spaces\":[{\"description\":\"A + semantic segmentation application that can predict unseen instances out of + the box.\",\"id\":\"facebook/ov-seg\"},{\"description\":\"One of the strongest + segmentation applications.\",\"id\":\"jbrinkma/segment-anything\"},{\"description\":\"A + human-centric segmentation model.\",\"id\":\"facebook/sapiens-pose\"},{\"description\":\"An + instance segmentation application to predict neuronal cell types from microscopy + images.\",\"id\":\"rashmi/sartorius-cell-instance-segmentation\"},{\"description\":\"An + application that segments videos.\",\"id\":\"ArtGAN/Segment-Anything-Video\"},{\"description\":\"An + panoptic segmentation application built for outdoor environments.\",\"id\":\"segments/panoptic-segment-anything\"}],\"summary\":\"Image + Segmentation divides an image into segments where each pixel in the image + is mapped to an object. This task has multiple variants such as instance segmentation, + panoptic segmentation and semantic segmentation.\",\"widgetModels\":[\"nvidia/segformer-b0-finetuned-ade-512-512\"],\"youtubeId\":\"dKE8SIt9C-w\",\"id\":\"image-segmentation\",\"label\":\"Image + Segmentation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"image-to-image\":{\"datasets\":[{\"description\":\"Synthetic + dataset, for image relighting\",\"id\":\"VIDIT\"},{\"description\":\"Multiple + images of celebrities, used for facial expression translation\",\"id\":\"huggan/CelebA-faces\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-to-image-input.jpeg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"image-to-image-output.png\",\"type\":\"img\"}]},\"isPlaceholder\":false,\"metrics\":[{\"description\":\"Peak + Signal to Noise Ratio (PSNR) is an approximation of the human perception, + considering the ratio of the absolute intensity with respect to the variations. + Measured in dB, a high value indicates a high fidelity.\",\"id\":\"PSNR\"},{\"description\":\"Structural + Similarity Index (SSIM) is a perceptual metric which compares the luminance, + contrast and structure of two images. The values of SSIM range between -1 + and 1, and higher values indicate closer resemblance to the original image.\",\"id\":\"SSIM\"},{\"description\":\"Inception + Score (IS) is an analysis of the labels predicted by an image classification + model when presented with a sample of the generated images.\",\"id\":\"IS\"}],\"models\":[{\"description\":\"An + image-to-image model to improve image resolution.\",\"id\":\"fal/AuraSR-v2\"},{\"description\":\"A + model that increases the resolution of an image.\",\"id\":\"keras-io/super-resolution\"},{\"description\":\"A + model that creates a set of variations of the input image in the style of + DALL-E using Stable Diffusion.\",\"id\":\"lambdalabs/sd-image-variations-diffusers\"},{\"description\":\"A + model that generates images based on segments in the input image and the text + prompt.\",\"id\":\"mfidabel/controlnet-segment-anything\"},{\"description\":\"A + model that takes an image and an instruction to edit the image.\",\"id\":\"timbrooks/instruct-pix2pix\"}],\"spaces\":[{\"description\":\"Image + enhancer application for low light.\",\"id\":\"keras-io/low-light-image-enhancement\"},{\"description\":\"Style + transfer application.\",\"id\":\"keras-io/neural-style-transfer\"},{\"description\":\"An + application that generates images based on segment control.\",\"id\":\"mfidabel/controlnet-segment-anything\"},{\"description\":\"Image + generation application that takes image control and text prompt.\",\"id\":\"hysts/ControlNet\"},{\"description\":\"Colorize + any image using this app.\",\"id\":\"ioclab/brightness-controlnet\"},{\"description\":\"Edit + images with instructions.\",\"id\":\"timbrooks/instruct-pix2pix\"}],\"summary\":\"Image-to-image + is the task of transforming an input image through a variety of possible manipulations + and enhancements, such as super-resolution, image inpainting, colorization, + and more.\",\"widgetModels\":[\"stabilityai/stable-diffusion-2-inpainting\"],\"youtubeId\":\"\",\"id\":\"image-to-image\",\"label\":\"Image-to-Image\",\"libraries\":[\"diffusers\",\"transformers\",\"transformers.js\"]},\"image-text-to-text\":{\"datasets\":[{\"description\":\"Instructions + composed of image and text.\",\"id\":\"liuhaotian/LLaVA-Instruct-150K\"},{\"description\":\"Conversation + turns where questions involve image and text.\",\"id\":\"liuhaotian/LLaVA-Pretrain\"},{\"description\":\"A + collection of datasets made for model fine-tuning.\",\"id\":\"HuggingFaceM4/the_cauldron\"},{\"description\":\"Screenshots + of websites with their HTML/CSS codes.\",\"id\":\"HuggingFaceM4/WebSight\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-text-to-text-input.png\",\"type\":\"img\"},{\"label\":\"Text + Prompt\",\"content\":\"Describe the position of the bee in detail.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"The + bee is sitting on a pink flower, surrounded by other flowers. The bee is positioned + in the center of the flower, with its head and front legs sticking out.\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"Powerful + vision language model with great visual understanding and reasoning capabilities.\",\"id\":\"meta-llama/Llama-3.2-11B-Vision-Instruct\"},{\"description\":\"Cutting-edge + vision language models.\",\"id\":\"allenai/Molmo-7B-D-0924\"},{\"description\":\"Small + yet powerful model.\",\"id\":\"vikhyatk/moondream2\"},{\"description\":\"Strong + image-text-to-text model.\",\"id\":\"Qwen/Qwen2-VL-7B-Instruct\"},{\"description\":\"Strong + image-text-to-text model.\",\"id\":\"mistralai/Pixtral-12B-2409\"},{\"description\":\"Strong + image-text-to-text model focused on documents.\",\"id\":\"stepfun-ai/GOT-OCR2_0\"}],\"spaces\":[{\"description\":\"Leaderboard + to evaluate vision language models.\",\"id\":\"opencompass/open_vlm_leaderboard\"},{\"description\":\"Vision + language models arena, where models are ranked by votes of users.\",\"id\":\"WildVision/vision-arena\"},{\"description\":\"Powerful + vision-language model assistant.\",\"id\":\"akhaliq/Molmo-7B-D-0924\"},{\"description\":\"An + image-text-to-text application focused on documents.\",\"id\":\"stepfun-ai/GOT_official_online_demo\"},{\"description\":\"An + application to compare outputs of different vision language models.\",\"id\":\"merve/compare_VLMs\"},{\"description\":\"An + application for chatting with an image-text-to-text model.\",\"id\":\"GanymedeNil/Qwen2-VL-7B\"}],\"summary\":\"Image-text-to-text + models take in an image and text prompt and output text. These models are + also called vision-language models, or VLMs. The difference from image-to-text + models is that these models take an additional text input, not restricting + the model to certain use cases like image captioning, and may also be trained + to accept a conversation as input.\",\"widgetModels\":[\"meta-llama/Llama-3.2-11B-Vision-Instruct\"],\"youtubeId\":\"IoGaGfU1CIg\",\"id\":\"image-text-to-text\",\"label\":\"Image-Text-to-Text\",\"libraries\":[\"transformers\"]},\"image-to-text\":{\"datasets\":[{\"description\":\"Dataset + from 12M image-text of Reddit\",\"id\":\"red_caps\"},{\"description\":\"Dataset + from 3.3M images of Google\",\"id\":\"datasets/conceptual_captions\"}],\"demo\":{\"inputs\":[{\"filename\":\"savanna.jpg\",\"type\":\"img\"}],\"outputs\":[{\"label\":\"Detailed + description\",\"content\":\"a herd of giraffes and zebras grazing in a field\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + robust image captioning model.\",\"id\":\"Salesforce/blip2-opt-2.7b\"},{\"description\":\"A + powerful and accurate image-to-text model that can also localize concepts + in images.\",\"id\":\"microsoft/kosmos-2-patch14-224\"},{\"description\":\"A + strong optical character recognition model.\",\"id\":\"facebook/nougat-base\"},{\"description\":\"A + powerful model that lets you have a conversation with the image.\",\"id\":\"llava-hf/llava-1.5-7b-hf\"}],\"spaces\":[{\"description\":\"An + application that compares various image captioning models.\",\"id\":\"nielsr/comparing-captioning-models\"},{\"description\":\"A + robust image captioning application.\",\"id\":\"flax-community/image-captioning\"},{\"description\":\"An + application that transcribes handwritings into text.\",\"id\":\"nielsr/TrOCR-handwritten\"},{\"description\":\"An + application that can caption images and answer questions about a given image.\",\"id\":\"Salesforce/BLIP\"},{\"description\":\"An + application that can caption images and answer questions with a conversational + agent.\",\"id\":\"Salesforce/BLIP2\"},{\"description\":\"An image captioning + application that demonstrates the effect of noise on captions.\",\"id\":\"johko/capdec-image-captioning\"}],\"summary\":\"Image + to text models output a text from a given image. Image captioning or optical + character recognition can be considered as the most common applications of + image to text.\",\"widgetModels\":[\"Salesforce/blip-image-captioning-large\"],\"youtubeId\":\"\",\"id\":\"image-to-text\",\"label\":\"Image-to-Text\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"keypoint-detection\":{\"datasets\":[{\"description\":\"A + dataset of hand keypoints of over 500k examples.\",\"id\":\"Vincent-luo/hagrid-mediapipe-hands\"}],\"demo\":{\"inputs\":[{\"filename\":\"keypoint-detection-input.png\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"keypoint-detection-output.png\",\"type\":\"img\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + robust keypoint detection model.\",\"id\":\"magic-leap-community/superpoint\"},{\"description\":\"Strong + keypoint detection model used to detect human pose.\",\"id\":\"facebook/sapiens-pose-1b\"}],\"spaces\":[{\"description\":\"An + application that detects hand keypoints in real-time.\",\"id\":\"datasciencedojo/Hand-Keypoint-Detection-Realtime\"},{\"description\":\"An + application to try a universal keypoint detection model.\",\"id\":\"merve/SuperPoint\"}],\"summary\":\"Keypoint + detection is the task of identifying meaningful distinctive points or features + in an image.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"keypoint-detection\",\"label\":\"Keypoint + Detection\",\"libraries\":[\"transformers\"]},\"mask-generation\":{\"datasets\":[],\"demo\":{\"inputs\":[{\"filename\":\"mask-generation-input.png\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"mask-generation-output.png\",\"type\":\"img\"}]},\"metrics\":[],\"models\":[{\"description\":\"Small + yet powerful mask generation model.\",\"id\":\"Zigeng/SlimSAM-uniform-50\"},{\"description\":\"Very + strong mask generation model.\",\"id\":\"facebook/sam2-hiera-large\"}],\"spaces\":[{\"description\":\"An + application that combines a mask generation model with a zero-shot object + detection model for text-guided image segmentation.\",\"id\":\"merve/OWLSAM2\"},{\"description\":\"An + application that compares the performance of a large and a small mask generation + model.\",\"id\":\"merve/slimsam\"},{\"description\":\"An application based + on an improved mask generation model.\",\"id\":\"SkalskiP/segment-anything-model-2\"},{\"description\":\"An + application to remove objects from videos using mask generation models.\",\"id\":\"SkalskiP/SAM_and_ProPainter\"}],\"summary\":\"Mask + generation is the task of generating masks that identify a specific object + or region of interest in a given image. Masks are often used in segmentation + tasks, where they provide a precise way to isolate the object of interest + for further processing or analysis.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"mask-generation\",\"label\":\"Mask + Generation\",\"libraries\":[\"transformers\"]},\"object-detection\":{\"datasets\":[{\"description\":\"Widely + used benchmark dataset for multiple vision tasks.\",\"id\":\"merve/coco2017\"},{\"description\":\"Multi-task + computer vision benchmark.\",\"id\":\"merve/pascal-voc\"}],\"demo\":{\"inputs\":[{\"filename\":\"object-detection-input.jpg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"object-detection-output.jpg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + Average Precision (AP) metric is the Area Under the PR Curve (AUC-PR). It + is calculated for each class separately\",\"id\":\"Average Precision\"},{\"description\":\"The + Mean Average Precision (mAP) metric is the overall average of the AP values\",\"id\":\"Mean + Average Precision\"},{\"description\":\"The AP\u03B1 metric is the Average + Precision at the IoU threshold of a \u03B1 value, for example, AP50 and AP75\",\"id\":\"AP\u03B1\"}],\"models\":[{\"description\":\"Solid + object detection model pre-trained on the COCO 2017 dataset.\",\"id\":\"facebook/detr-resnet-50\"},{\"description\":\"Real-time + and accurate object detection model.\",\"id\":\"jameslahm/yolov10x\"},{\"description\":\"Fast + and accurate object detection model trained on COCO and Object365 datasets.\",\"id\":\"PekingU/rtdetr_r18vd_coco_o365\"}],\"spaces\":[{\"description\":\"Leaderboard + to compare various object detection models across several metrics.\",\"id\":\"hf-vision/object_detection_leaderboard\"},{\"description\":\"An + application that contains various object detection models to try from.\",\"id\":\"Gradio-Blocks/Object-Detection-With-DETR-and-YOLOS\"},{\"description\":\"An + application that shows multiple cutting edge techniques for object detection + and tracking.\",\"id\":\"kadirnar/torchyolo\"},{\"description\":\"An object + tracking, segmentation and inpainting application.\",\"id\":\"VIPLab/Track-Anything\"},{\"description\":\"Very + fast object tracking application based on object detection.\",\"id\":\"merve/RT-DETR-tracking-coco\"}],\"summary\":\"Object + Detection models allow users to identify objects of certain defined classes. + Object detection models receive an image as input and output the images with + bounding boxes and labels on detected objects.\",\"widgetModels\":[\"facebook/detr-resnet-50\"],\"youtubeId\":\"WdAeKSOpxhw\",\"id\":\"object-detection\",\"label\":\"Object + Detection\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"video-classification\":{\"datasets\":[{\"description\":\"Benchmark + dataset used for video classification with videos that belong to 400 classes.\",\"id\":\"kinetics400\"}],\"demo\":{\"inputs\":[{\"filename\":\"video-classification-input.gif\",\"type\":\"img\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Playing + Guitar\",\"score\":0.514},{\"label\":\"Playing Tennis\",\"score\":0.193},{\"label\":\"Cooking\",\"score\":0.068}]}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"Strong + Video Classification model trained on the Kinetics 400 dataset.\",\"id\":\"google/vivit-b-16x2-kinetics400\"},{\"description\":\"Strong + Video Classification model trained on the Kinetics 400 dataset.\",\"id\":\"microsoft/xclip-base-patch32\"}],\"spaces\":[{\"description\":\"An + application that classifies video at different timestamps.\",\"id\":\"nateraw/lavila\"},{\"description\":\"An + application that classifies video.\",\"id\":\"fcakyon/video-classification\"}],\"summary\":\"Video + classification is the task of assigning a label or class to an entire video. + Videos are expected to have only one class for each video. Video classification + models take a video as input and return a prediction about which class the + video belongs to.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"video-classification\",\"label\":\"Video + Classification\",\"libraries\":[\"transformers\"]},\"question-answering\":{\"datasets\":[{\"description\":\"A + famous question answering dataset based on English articles from Wikipedia.\",\"id\":\"squad_v2\"},{\"description\":\"A + dataset of aggregated anonymized actual queries issued to the Google search + engine.\",\"id\":\"natural_questions\"}],\"demo\":{\"inputs\":[{\"label\":\"Question\",\"content\":\"Which + name is also used to describe the Amazon rainforest in English?\",\"type\":\"text\"},{\"label\":\"Context\",\"content\":\"The + Amazon rainforest, also known in English as Amazonia or the Amazon Jungle\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"Amazonia\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Exact + Match is a metric based on the strict character match of the predicted answer + and the right answer. For answers predicted correctly, the Exact Match will + be 1. Even if only one character is different, Exact Match will be 0\",\"id\":\"exact-match\"},{\"description\":\" + The F1-Score metric is useful if we value both false positives and false negatives + equally. The F1-Score is calculated on each word in the predicted sequence + against the correct answer\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + robust baseline model for most question answering domains.\",\"id\":\"deepset/roberta-base-squad2\"},{\"description\":\"Small + yet robust model that can answer questions.\",\"id\":\"distilbert/distilbert-base-cased-distilled-squad\"},{\"description\":\"A + special model that can answer questions from tables.\",\"id\":\"google/tapas-base-finetuned-wtq\"}],\"spaces\":[{\"description\":\"An + application that can answer a long question from Wikipedia.\",\"id\":\"deepset/wikipedia-assistant\"}],\"summary\":\"Question + Answering models can retrieve the answer to a question from a given text, + which is useful for searching for an answer in a document. Some question answering + models can generate answers without context!\",\"widgetModels\":[\"deepset/roberta-base-squad2\"],\"youtubeId\":\"ajPx5LwJD-I\",\"id\":\"question-answering\",\"label\":\"Question + Answering\",\"libraries\":[\"adapter-transformers\",\"allennlp\",\"transformers\",\"transformers.js\"]},\"reinforcement-learning\":{\"datasets\":[{\"description\":\"A + curation of widely used datasets for Data Driven Deep Reinforcement Learning + (D4RL)\",\"id\":\"edbeeching/decision_transformer_gym_replay\"}],\"demo\":{\"inputs\":[{\"label\":\"State\",\"content\":\"Red + traffic light, pedestrians are about to pass.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Action\",\"content\":\"Stop + the car.\",\"type\":\"text\"},{\"label\":\"Next State\",\"content\":\"Yellow + light, pedestrians have crossed.\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Accumulated + reward across all time steps discounted by a factor that ranges between 0 + and 1 and determines how much the agent optimizes for future relative to immediate + rewards. Measures how good is the policy ultimately found by a given algorithm + considering uncertainty over the future.\",\"id\":\"Discounted Total Reward\"},{\"description\":\"Average + return obtained after running the policy for a certain number of evaluation + episodes. As opposed to total reward, mean reward considers how much reward + a given algorithm receives while learning.\",\"id\":\"Mean Reward\"},{\"description\":\"Measures + how good a given algorithm is after a predefined time. Some algorithms may + be guaranteed to converge to optimal behavior across many time steps. However, + an agent that reaches an acceptable level of optimality after a given time + horizon may be preferable to one that ultimately reaches optimality but takes + a long time.\",\"id\":\"Level of Performance After Some Time\"}],\"models\":[{\"description\":\"A + Reinforcement Learning model trained on expert data from the Gym Hopper environment\",\"id\":\"edbeeching/decision-transformer-gym-hopper-expert\"},{\"description\":\"A + PPO agent playing seals/CartPole-v0 using the stable-baselines3 library and + the RL Zoo.\",\"id\":\"HumanCompatibleAI/ppo-seals-CartPole-v0\"}],\"spaces\":[{\"description\":\"An + application for a cute puppy agent learning to catch a stick.\",\"id\":\"ThomasSimonini/Huggy\"},{\"description\":\"An + application to play Snowball Fight with a reinforcement learning agent.\",\"id\":\"ThomasSimonini/SnowballFight\"}],\"summary\":\"Reinforcement + learning is the computational approach of learning from action by interacting + with an environment through trial and error and receiving rewards (negative + or positive) as feedback\",\"widgetModels\":[],\"youtubeId\":\"q0BiUn5LiBc\",\"id\":\"reinforcement-learning\",\"label\":\"Reinforcement + Learning\",\"libraries\":[\"transformers\",\"stable-baselines3\",\"ml-agents\",\"sample-factory\"]},\"sentence-similarity\":{\"datasets\":[{\"description\":\"Bing + queries with relevant passages from various web sources.\",\"id\":\"ms_marco\"}],\"demo\":{\"inputs\":[{\"label\":\"Source + sentence\",\"content\":\"Machine learning is so easy.\",\"type\":\"text\"},{\"label\":\"Sentences + to compare to\",\"content\":\"Deep learning is so straightforward.\",\"type\":\"text\"},{\"label\":\"\",\"content\":\"This + is so difficult, like rocket science.\",\"type\":\"text\"},{\"label\":\"\",\"content\":\"I + can't believe how much I struggled with this.\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Deep + learning is so straightforward.\",\"score\":0.623},{\"label\":\"This is so + difficult, like rocket science.\",\"score\":0.413},{\"label\":\"I can't believe + how much I struggled with this.\",\"score\":0.256}]}]},\"metrics\":[{\"description\":\"Reciprocal + Rank is a measure used to rank the relevancy of documents given a set of documents. + Reciprocal Rank is the reciprocal of the rank of the document retrieved, meaning, + if the rank is 3, the Reciprocal Rank is 0.33. If the rank is 1, the Reciprocal + Rank is 1\",\"id\":\"Mean Reciprocal Rank\"},{\"description\":\"The similarity + of the embeddings is evaluated mainly on cosine similarity. It is calculated + as the cosine of the angle between two vectors. It is particularly useful + when your texts are not the same length\",\"id\":\"Cosine Similarity\"}],\"models\":[{\"description\":\"This + model works well for sentences and paragraphs and can be used for clustering/grouping + and semantic searches.\",\"id\":\"sentence-transformers/all-mpnet-base-v2\"},{\"description\":\"A + multilingual robust sentence similarity model..\",\"id\":\"BAAI/bge-m3\"}],\"spaces\":[{\"description\":\"An + application that leverages sentence similarity to answer questions from YouTube + videos.\",\"id\":\"Gradio-Blocks/Ask_Questions_To_YouTube_Videos\"},{\"description\":\"An + application that retrieves relevant PubMed abstracts for a given online article + which can be used as further references.\",\"id\":\"Gradio-Blocks/pubmed-abstract-retriever\"},{\"description\":\"An + application that leverages sentence similarity to summarize text.\",\"id\":\"nickmuchi/article-text-summarizer\"},{\"description\":\"A + guide that explains how Sentence Transformers can be used for semantic search.\",\"id\":\"sentence-transformers/Sentence_Transformers_for_semantic_search\"}],\"summary\":\"Sentence + Similarity is the task of determining how similar two texts are. Sentence + similarity models convert input texts into vectors (embeddings) that capture + semantic information and calculate how close (similar) they are between them. + This task is particularly useful for information retrieval and clustering/grouping.\",\"widgetModels\":[\"BAAI/bge-small-en-v1.5\"],\"youtubeId\":\"VCZq5AkbNEU\",\"id\":\"sentence-similarity\",\"label\":\"Sentence + Similarity\",\"libraries\":[\"sentence-transformers\",\"spacy\",\"transformers.js\"]},\"summarization\":{\"canonicalId\":\"text2text-generation\",\"datasets\":[{\"description\":\"News + articles in five different languages along with their summaries. Widely used + for benchmarking multilingual summarization models.\",\"id\":\"mlsum\"},{\"description\":\"English + conversations and their summaries. Useful for benchmarking conversational + agents.\",\"id\":\"samsum\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"The + tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey + building, and the tallest structure in Paris. Its base is square, measuring + 125 metres (410 ft) on each side. It was the first structure to reach a height + of 300 metres. Excluding transmitters, the Eiffel Tower is the second tallest + free-standing structure in France after the Millau Viaduct.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Output\",\"content\":\"The + tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey + building. It was the first structure to reach a height of 300 metres.\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"The + generated sequence is compared against its summary, and the overlap of tokens + are counted. ROUGE-N refers to overlap of N subsequent tokens, ROUGE-1 refers + to overlap of single tokens and ROUGE-2 is the overlap of two subsequent tokens.\",\"id\":\"rouge\"}],\"models\":[{\"description\":\"A + strong summarization model trained on English news articles. Excels at generating + factual summaries.\",\"id\":\"facebook/bart-large-cnn\"},{\"description\":\"A + summarization model trained on medical articles.\",\"id\":\"Falconsai/medical_summarization\"}],\"spaces\":[{\"description\":\"An + application that can summarize long paragraphs.\",\"id\":\"pszemraj/summarize-long-text\"},{\"description\":\"A + much needed summarization application for terms and conditions.\",\"id\":\"ml6team/distilbart-tos-summarizer-tosdr\"},{\"description\":\"An + application that summarizes long documents.\",\"id\":\"pszemraj/document-summarization\"},{\"description\":\"An + application that can detect errors in abstractive summarization.\",\"id\":\"ml6team/post-processing-summarization\"}],\"summary\":\"Summarization + is the task of producing a shorter version of a document while preserving + its important information. Some models can extract text from the original + input, while other models can generate entirely new text.\",\"widgetModels\":[\"facebook/bart-large-cnn\"],\"youtubeId\":\"yHnr5Dk2zCI\",\"id\":\"summarization\",\"label\":\"Summarization\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"table-question-answering\":{\"datasets\":[{\"description\":\"The + WikiTableQuestions dataset is a large-scale dataset for the task of question + answering on semi-structured tables.\",\"id\":\"wikitablequestions\"},{\"description\":\"WikiSQL + is a dataset of 80654 hand-annotated examples of questions and SQL queries + distributed across 24241 tables from Wikipedia.\",\"id\":\"wikisql\"}],\"demo\":{\"inputs\":[{\"table\":[[\"Rank\",\"Name\",\"No.of + reigns\",\"Combined days\"],[\"1\",\"lou Thesz\",\"3\",\"3749\"],[\"2\",\"Ric + Flair\",\"8\",\"3103\"],[\"3\",\"Harley Race\",\"7\",\"1799\"]],\"type\":\"tabular\"},{\"label\":\"Question\",\"content\":\"What + is the number of reigns for Harley Race?\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Result\",\"content\":\"7\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Checks + whether the predicted answer(s) is the same as the ground-truth answer(s).\",\"id\":\"Denotation + Accuracy\"}],\"models\":[{\"description\":\"A table question answering model + that is capable of neural SQL execution, i.e., employ TAPEX to execute a SQL + query on a given table.\",\"id\":\"microsoft/tapex-base\"},{\"description\":\"A + robust table question answering model.\",\"id\":\"google/tapas-base-finetuned-wtq\"}],\"spaces\":[{\"description\":\"An + application that answers questions based on table CSV files.\",\"id\":\"katanaml/table-query\"}],\"summary\":\"Table + Question Answering (Table QA) is the answering a question about an information + on a given table.\",\"widgetModels\":[\"google/tapas-base-finetuned-wtq\"],\"id\":\"table-question-answering\",\"label\":\"Table + Question Answering\",\"libraries\":[\"transformers\"]},\"tabular-classification\":{\"datasets\":[{\"description\":\"A + comprehensive curation of datasets covering all benchmarks.\",\"id\":\"inria-soda/tabular-benchmark\"}],\"demo\":{\"inputs\":[{\"table\":[[\"Glucose\",\"Blood + Pressure \",\"Skin Thickness\",\"Insulin\",\"BMI\"],[\"148\",\"72\",\"35\",\"0\",\"33.6\"],[\"150\",\"50\",\"30\",\"0\",\"35.1\"],[\"141\",\"60\",\"29\",\"1\",\"39.2\"]],\"type\":\"tabular\"}],\"outputs\":[{\"table\":[[\"Diabetes\"],[\"1\"],[\"1\"],[\"0\"]],\"type\":\"tabular\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"Breast + cancer prediction model based on decision trees.\",\"id\":\"scikit-learn/cancer-prediction-trees\"}],\"spaces\":[{\"description\":\"An + application that can predict defective products on a production line.\",\"id\":\"scikit-learn/tabular-playground\"},{\"description\":\"An + application that compares various tabular classification techniques on different + datasets.\",\"id\":\"scikit-learn/classification\"}],\"summary\":\"Tabular + classification is the task of classifying a target category (a group) based + on set of attributes.\",\"widgetModels\":[\"scikit-learn/tabular-playground\"],\"youtubeId\":\"\",\"id\":\"tabular-classification\",\"label\":\"Tabular + Classification\",\"libraries\":[\"sklearn\"]},\"tabular-regression\":{\"datasets\":[{\"description\":\"A + comprehensive curation of datasets covering all benchmarks.\",\"id\":\"inria-soda/tabular-benchmark\"}],\"demo\":{\"inputs\":[{\"table\":[[\"Car + Name\",\"Horsepower\",\"Weight\"],[\"ford torino\",\"140\",\"3,449\"],[\"amc + hornet\",\"97\",\"2,774\"],[\"toyota corolla\",\"65\",\"1,773\"]],\"type\":\"tabular\"}],\"outputs\":[{\"table\":[[\"MPG + (miles per gallon)\"],[\"17\"],[\"18\"],[\"31\"]],\"type\":\"tabular\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"mse\"},{\"description\":\"Coefficient + of determination (or R-squared) is a measure of how well the model fits the + data. Higher R-squared is considered a better fit.\",\"id\":\"r-squared\"}],\"models\":[{\"description\":\"Fish + weight prediction based on length measurements and species.\",\"id\":\"scikit-learn/Fish-Weight\"}],\"spaces\":[{\"description\":\"An + application that can predict weight of a fish based on set of attributes.\",\"id\":\"scikit-learn/fish-weight-prediction\"}],\"summary\":\"Tabular + regression is the task of predicting a numerical value given a set of attributes.\",\"widgetModels\":[\"scikit-learn/Fish-Weight\"],\"youtubeId\":\"\",\"id\":\"tabular-regression\",\"label\":\"Tabular + Regression\",\"libraries\":[\"sklearn\"]},\"text-classification\":{\"datasets\":[{\"description\":\"A + widely used dataset used to benchmark multiple variants of text classification.\",\"id\":\"nyu-mll/glue\"},{\"description\":\"A + text classification dataset used to benchmark natural language inference models\",\"id\":\"stanfordnlp/snli\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"I + love Hugging Face!\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"POSITIVE\",\"score\":0.9},{\"label\":\"NEUTRAL\",\"score\":0.1},{\"label\":\"NEGATIVE\",\"score\":0}]}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"The + F1 metric is the harmonic mean of the precision and recall. It can be calculated + as: F1 = 2 * (precision * recall) / (precision + recall)\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + robust model trained for sentiment analysis.\",\"id\":\"distilbert/distilbert-base-uncased-finetuned-sst-2-english\"},{\"description\":\"A + sentiment analysis model specialized in financial sentiment.\",\"id\":\"ProsusAI/finbert\"},{\"description\":\"A + sentiment analysis model specialized in analyzing tweets.\",\"id\":\"cardiffnlp/twitter-roberta-base-sentiment-latest\"},{\"description\":\"A + model that can classify languages.\",\"id\":\"papluca/xlm-roberta-base-language-detection\"},{\"description\":\"A + model that can classify text generation attacks.\",\"id\":\"meta-llama/Prompt-Guard-86M\"}],\"spaces\":[{\"description\":\"An + application that can classify financial sentiment.\",\"id\":\"IoannisTr/Tech_Stocks_Trading_Assistant\"},{\"description\":\"A + dashboard that contains various text classification tasks.\",\"id\":\"miesnerjacob/Multi-task-NLP\"},{\"description\":\"An + application that analyzes user reviews in healthcare.\",\"id\":\"spacy/healthsea-demo\"}],\"summary\":\"Text + Classification is the task of assigning a label or class to a given text. + Some use cases are sentiment analysis, natural language inference, and assessing + grammatical correctness.\",\"widgetModels\":[\"distilbert/distilbert-base-uncased-finetuned-sst-2-english\"],\"youtubeId\":\"leNG9fN9FQU\",\"id\":\"text-classification\",\"label\":\"Text + Classification\",\"libraries\":[\"adapter-transformers\",\"setfit\",\"spacy\",\"transformers\",\"transformers.js\"]},\"text-generation\":{\"datasets\":[{\"description\":\"A + large multilingual dataset of text crawled from the web.\",\"id\":\"mc4\"},{\"description\":\"Diverse + open-source data consisting of 22 smaller high-quality datasets. It was used + to train GPT-Neo.\",\"id\":\"the_pile\"},{\"description\":\"Truly open-source, + curated and cleaned dialogue dataset.\",\"id\":\"HuggingFaceH4/ultrachat_200k\"},{\"description\":\"An + instruction dataset with preference ratings on responses.\",\"id\":\"openbmb/UltraFeedback\"},{\"description\":\"A + large synthetic dataset for alignment of text generation models.\",\"id\":\"argilla/magpie-ultra-v0.1\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"Once + upon a time,\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Output\",\"content\":\"Once + upon a time, we knew that our ancestors were on the verge of extinction. The + great explorers and poets of the Old World, from Alexander the Great to Chaucer, + are dead and gone. A good many of our ancient explorers and poets have\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Cross + Entropy is a metric that calculates the difference between two probability + distributions. Each probability distribution is the distribution of predicted + words\",\"id\":\"Cross Entropy\"},{\"description\":\"The Perplexity metric + is the exponential of the cross-entropy loss. It evaluates the probabilities + assigned to the next word by the model. Lower perplexity indicates better + performance\",\"id\":\"Perplexity\"}],\"models\":[{\"description\":\"A text-generation + model trained to follow instructions.\",\"id\":\"google/gemma-2-2b-it\"},{\"description\":\"Very + powerful text generation model trained to follow instructions.\",\"id\":\"meta-llama/Meta-Llama-3.1-8B-Instruct\"},{\"description\":\"Small + yet powerful text generation model.\",\"id\":\"microsoft/Phi-3-mini-4k-instruct\"},{\"description\":\"A + very powerful model that can solve mathematical problems.\",\"id\":\"AI-MO/NuminaMath-7B-TIR\"},{\"description\":\"Strong + text generation model to follow instructions.\",\"id\":\"Qwen/Qwen2.5-7B-Instruct\"},{\"description\":\"Very + strong open-source large language model.\",\"id\":\"nvidia/Llama-3.1-Nemotron-70B-Instruct\"}],\"spaces\":[{\"description\":\"A + leaderboard to compare different open-source text generation models based + on various benchmarks.\",\"id\":\"open-llm-leaderboard/open_llm_leaderboard\"},{\"description\":\"A + leaderboard for comparing chain-of-thought performance of models.\",\"id\":\"logikon/open_cot_leaderboard\"},{\"description\":\"An + text generation based application based on a very powerful LLaMA2 model.\",\"id\":\"ysharma/Explore_llamav2_with_TGI\"},{\"description\":\"An + text generation based application to converse with Zephyr model.\",\"id\":\"HuggingFaceH4/zephyr-chat\"},{\"description\":\"A + leaderboard that ranks text generation models based on blind votes from people.\",\"id\":\"lmsys/chatbot-arena-leaderboard\"},{\"description\":\"An + chatbot to converse with a very powerful text generation model.\",\"id\":\"mlabonne/phixtral-chat\"}],\"summary\":\"Generating + text is the task of generating new text given another text. These models can, + for example, fill in incomplete text or paraphrase.\",\"widgetModels\":[\"mistralai/Mistral-Nemo-Instruct-2407\"],\"youtubeId\":\"e9gNEAlsOvU\",\"id\":\"text-generation\",\"label\":\"Text + Generation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"text-to-image\":{\"datasets\":[{\"description\":\"RedCaps + is a large-scale dataset of 12M image-text pairs collected from Reddit.\",\"id\":\"red_caps\"},{\"description\":\"Conceptual + Captions is a dataset consisting of ~3.3M images annotated with captions.\",\"id\":\"conceptual_captions\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"A + city above clouds, pastel colors, Victorian style\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"image.jpeg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + Inception Score (IS) measure assesses diversity and meaningfulness. It uses + a generated image sample to predict its label. A higher score signifies more + diverse and meaningful images.\",\"id\":\"IS\"},{\"description\":\"The Fr\xE9chet + Inception Distance (FID) calculates the distance between distributions between + synthetic and real samples. A lower FID score indicates better similarity + between the distributions of real and generated images.\",\"id\":\"FID\"},{\"description\":\"R-precision + assesses how the generated image aligns with the provided text description. + It uses the generated images as queries to retrieve relevant text descriptions. + The top 'r' relevant descriptions are selected and used to calculate R-precision + as r/R, where 'R' is the number of ground truth descriptions associated with + the generated images. A higher R-precision value indicates a better model.\",\"id\":\"R-Precision\"}],\"models\":[{\"description\":\"One + of the most powerful image generation models that can generate realistic outputs.\",\"id\":\"black-forest-labs/FLUX.1-dev\"},{\"description\":\"A + powerful yet fast image generation model.\",\"id\":\"latent-consistency/lcm-lora-sdxl\"},{\"description\":\"Text-to-image + model for photorealistic generation.\",\"id\":\"Kwai-Kolors/Kolors\"},{\"description\":\"A + powerful text-to-image model.\",\"id\":\"stabilityai/stable-diffusion-3-medium-diffusers\"}],\"spaces\":[{\"description\":\"A + powerful text-to-image application.\",\"id\":\"stabilityai/stable-diffusion-3-medium\"},{\"description\":\"A + text-to-image application to generate comics.\",\"id\":\"jbilcke-hf/ai-comic-factory\"},{\"description\":\"An + application to match multiple custom image generation models.\",\"id\":\"multimodalart/flux-lora-lab\"},{\"description\":\"A + powerful yet very fast image generation application.\",\"id\":\"latent-consistency/lcm-lora-for-sdxl\"},{\"description\":\"A + gallery to explore various text-to-image models.\",\"id\":\"multimodalart/LoraTheExplorer\"},{\"description\":\"An + application for `text-to-image`, `image-to-image` and image inpainting.\",\"id\":\"ArtGAN/Stable-Diffusion-ControlNet-WebUI\"},{\"description\":\"An + application to generate realistic images given photos of a person and a prompt.\",\"id\":\"InstantX/InstantID\"}],\"summary\":\"Text-to-image + is the task of generating images from input text. These pipelines can also + be used to modify and edit images based on text prompts.\",\"widgetModels\":[\"black-forest-labs/FLUX.1-dev\"],\"youtubeId\":\"\",\"id\":\"text-to-image\",\"label\":\"Text-to-Image\",\"libraries\":[\"diffusers\"]},\"text-to-speech\":{\"canonicalId\":\"text-to-audio\",\"datasets\":[{\"description\":\"10K + hours of multi-speaker English dataset.\",\"id\":\"parler-tts/mls_eng_10k\"},{\"description\":\"Multi-speaker + English dataset.\",\"id\":\"mythicinfinity/libritts_r\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"I + love audio models on the Hub!\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"audio.wav\",\"type\":\"audio\"}]},\"metrics\":[{\"description\":\"The + Mel Cepstral Distortion (MCD) metric is used to calculate the quality of generated + speech.\",\"id\":\"mel cepstral distortion\"}],\"models\":[{\"description\":\"A + powerful TTS model.\",\"id\":\"parler-tts/parler-tts-large-v1\"},{\"description\":\"A + massively multi-lingual TTS model.\",\"id\":\"coqui/XTTS-v2\"},{\"description\":\"Robust + TTS model.\",\"id\":\"metavoiceio/metavoice-1B-v0.1\"},{\"description\":\"A + prompt based, powerful TTS model.\",\"id\":\"parler-tts/parler_tts_mini_v0.1\"}],\"spaces\":[{\"description\":\"An + application for generate highly realistic, multilingual speech.\",\"id\":\"suno/bark\"},{\"description\":\"An + application on XTTS, a voice generation model that lets you clone voices into + different languages.\",\"id\":\"coqui/xtts\"},{\"description\":\"An application + that generates speech in different styles in English and Chinese.\",\"id\":\"mrfakename/E2-F5-TTS\"},{\"description\":\"An + application that synthesizes speech for diverse speaker prompts.\",\"id\":\"parler-tts/parler_tts_mini\"}],\"summary\":\"Text-to-Speech + (TTS) is the task of generating natural sounding speech given text input. + TTS models can be extended to have a single model that generates speech for + multiple speakers and multiple languages.\",\"widgetModels\":[\"suno/bark\"],\"youtubeId\":\"NW62DpzJ274\",\"id\":\"text-to-speech\",\"label\":\"Text-to-Speech\",\"libraries\":[\"espnet\",\"tensorflowtts\",\"transformers\",\"transformers.js\"]},\"text-to-video\":{\"datasets\":[{\"description\":\"Microsoft + Research Video to Text is a large-scale dataset for open domain video captioning\",\"id\":\"iejMac/CLIP-MSR-VTT\"},{\"description\":\"UCF101 + Human Actions dataset consists of 13,320 video clips from YouTube, with 101 + classes.\",\"id\":\"quchenyuan/UCF101-ZIP\"},{\"description\":\"A high-quality + dataset for human action recognition in YouTube videos.\",\"id\":\"nateraw/kinetics\"},{\"description\":\"A + dataset of video clips of humans performing pre-defined basic actions with + everyday objects.\",\"id\":\"HuggingFaceM4/something_something_v2\"},{\"description\":\"This + dataset consists of text-video pairs and contains noisy samples with irrelevant + video descriptions\",\"id\":\"HuggingFaceM4/webvid\"},{\"description\":\"A + dataset of short Flickr videos for the temporal localization of events with + descriptions.\",\"id\":\"iejMac/CLIP-DiDeMo\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"Darth + Vader is surfing on the waves.\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"text-to-video-output.gif\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"Inception + Score uses an image classification model that predicts class labels and evaluates + how distinct and diverse the images are. A higher score indicates better video + generation.\",\"id\":\"is\"},{\"description\":\"Frechet Inception Distance + uses an image classification model to obtain image embeddings. The metric + compares mean and standard deviation of the embeddings of real and generated + images. A smaller score indicates better video generation.\",\"id\":\"fid\"},{\"description\":\"Frechet + Video Distance uses a model that captures coherence for changes in frames + and the quality of each frame. A smaller score indicates better video generation.\",\"id\":\"fvd\"},{\"description\":\"CLIPSIM + measures similarity between video frames and text using an image-text similarity + model. A higher score indicates better video generation.\",\"id\":\"clipsim\"}],\"models\":[{\"description\":\"A + strong model for consistent video generation.\",\"id\":\"rain1011/pyramid-flow-sd3\"},{\"description\":\"A + robust model for text-to-video generation.\",\"id\":\"VideoCrafter/VideoCrafter2\"},{\"description\":\"A + cutting-edge text-to-video generation model.\",\"id\":\"TIGER-Lab/T2V-Turbo-V2\"}],\"spaces\":[{\"description\":\"An + application that generates video from text.\",\"id\":\"VideoCrafter/VideoCrafter\"},{\"description\":\"Consistent + video generation application.\",\"id\":\"TIGER-Lab/T2V-Turbo-V2\"},{\"description\":\"A + cutting edge video generation application.\",\"id\":\"Pyramid-Flow/pyramid-flow\"}],\"summary\":\"Text-to-video + models can be used in any application that requires generating consistent + sequence of images from text. \",\"widgetModels\":[],\"id\":\"text-to-video\",\"label\":\"Text-to-Video\",\"libraries\":[\"diffusers\"]},\"token-classification\":{\"datasets\":[{\"description\":\"A + widely used dataset useful to benchmark named entity recognition models.\",\"id\":\"eriktks/conll2003\"},{\"description\":\"A + multilingual dataset of Wikipedia articles annotated for named entity recognition + in over 150 different languages.\",\"id\":\"unimelb-nlp/wikiann\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"My + name is Omar and I live in Z\xFCrich.\",\"type\":\"text\"}],\"outputs\":[{\"text\":\"My + name is Omar and I live in Z\xFCrich.\",\"tokens\":[{\"type\":\"PERSON\",\"start\":11,\"end\":15},{\"type\":\"GPE\",\"start\":30,\"end\":36}],\"type\":\"text-with-tokens\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + robust performance model to identify people, locations, organizations and + names of miscellaneous entities.\",\"id\":\"dslim/bert-base-NER\"},{\"description\":\"A + strong model to identify people, locations, organizations and names in multiple + languages.\",\"id\":\"FacebookAI/xlm-roberta-large-finetuned-conll03-english\"},{\"description\":\"A + token classification model specialized on medical entity recognition.\",\"id\":\"blaze999/Medical-NER\"},{\"description\":\"Flair + models are typically the state of the art in named entity recognition tasks.\",\"id\":\"flair/ner-english\"}],\"spaces\":[{\"description\":\"An + application that can recognizes entities, extracts noun chunks and recognizes + various linguistic features of each token.\",\"id\":\"spacy/gradio_pipeline_visualizer\"}],\"summary\":\"Token + classification is a natural language understanding task in which a label is + assigned to some tokens in a text. Some popular token classification subtasks + are Named Entity Recognition (NER) and Part-of-Speech (PoS) tagging. NER models + could be trained to identify specific entities in a text, such as dates, individuals + and places; and PoS tagging would identify, for example, which words in a + text are verbs, nouns, and punctuation marks.\",\"widgetModels\":[\"FacebookAI/xlm-roberta-large-finetuned-conll03-english\"],\"youtubeId\":\"wVHdVlPScxA\",\"id\":\"token-classification\",\"label\":\"Token + Classification\",\"libraries\":[\"adapter-transformers\",\"flair\",\"spacy\",\"span-marker\",\"stanza\",\"transformers\",\"transformers.js\"]},\"translation\":{\"canonicalId\":\"text2text-generation\",\"datasets\":[{\"description\":\"A + dataset of copyright-free books translated into 16 different languages.\",\"id\":\"Helsinki-NLP/opus_books\"},{\"description\":\"An + example of translation between programming languages. This dataset consists + of functions in Java and C#.\",\"id\":\"google/code_x_glue_cc_code_to_code_trans\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"My + name is Omar and I live in Z\xFCrich.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Output\",\"content\":\"Mein + Name ist Omar und ich wohne in Z\xFCrich.\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"BLEU + score is calculated by counting the number of shared single or subsequent + tokens between the generated sequence and the reference. Subsequent n tokens + are called \u201Cn-grams\u201D. Unigram refers to a single token while bi-gram + refers to token pairs and n-grams refer to n subsequent tokens. The score + ranges from 0 to 1, where 1 means the translation perfectly matched and 0 + did not match at all\",\"id\":\"bleu\"},{\"description\":\"\",\"id\":\"sacrebleu\"}],\"models\":[{\"description\":\"Very + powerful model that can translate many languages between each other, especially + low-resource languages.\",\"id\":\"facebook/nllb-200-1.3B\"},{\"description\":\"A + general-purpose Transformer that can be used to translate from English to + German, French, or Romanian.\",\"id\":\"google-t5/t5-base\"}],\"spaces\":[{\"description\":\"An + application that can translate between 100 languages.\",\"id\":\"Iker/Translate-100-languages\"},{\"description\":\"An + application that can translate between many languages.\",\"id\":\"Geonmo/nllb-translation-demo\"}],\"summary\":\"Translation + is the task of converting text from one language to another.\",\"widgetModels\":[\"facebook/mbart-large-50-many-to-many-mmt\"],\"youtubeId\":\"1JvfrvZgi6c\",\"id\":\"translation\",\"label\":\"Translation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"unconditional-image-generation\":{\"datasets\":[{\"description\":\"The + CIFAR-100 dataset consists of 60000 32x32 colour images in 100 classes, with + 600 images per class.\",\"id\":\"cifar100\"},{\"description\":\"Multiple images + of celebrities, used for facial expression translation.\",\"id\":\"CelebA\"}],\"demo\":{\"inputs\":[{\"label\":\"Seed\",\"content\":\"42\",\"type\":\"text\"},{\"label\":\"Number + of images to generate:\",\"content\":\"4\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"unconditional-image-generation-output.jpeg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + inception score (IS) evaluates the quality of generated images. It measures + the diversity of the generated images (the model predictions are evenly distributed + across all possible labels) and their 'distinction' or 'sharpness' (the model + confidently predicts a single label for each image).\",\"id\":\"Inception + score (IS)\"},{\"description\":\"The Fr\xE9chet Inception Distance (FID) evaluates + the quality of images created by a generative model by calculating the distance + between feature vectors for real and generated images.\",\"id\":\"Fre\u0107het + Inception Distance (FID)\"}],\"models\":[{\"description\":\"High-quality image + generation model trained on the CIFAR-10 dataset. It synthesizes images of + the ten classes presented in the dataset using diffusion probabilistic models, + a class of latent variable models inspired by considerations from nonequilibrium + thermodynamics.\",\"id\":\"google/ddpm-cifar10-32\"},{\"description\":\"High-quality + image generation model trained on the 256x256 CelebA-HQ dataset. It synthesizes + images of faces using diffusion probabilistic models, a class of latent variable + models inspired by considerations from nonequilibrium thermodynamics.\",\"id\":\"google/ddpm-celebahq-256\"}],\"spaces\":[{\"description\":\"An + application that can generate realistic faces.\",\"id\":\"CompVis/celeba-latent-diffusion\"}],\"summary\":\"Unconditional + image generation is the task of generating images with no condition in any + context (like a prompt text or another image). Once trained, the model will + create images that resemble its training data distribution.\",\"widgetModels\":[\"\"],\"youtubeId\":\"\",\"id\":\"unconditional-image-generation\",\"label\":\"Unconditional + Image Generation\",\"libraries\":[\"diffusers\"]},\"video-text-to-text\":{\"datasets\":[{\"description\":\"Multiple-choice + questions and answers about videos.\",\"id\":\"lmms-lab/Video-MME\"},{\"description\":\"A + dataset of instructions and question-answer pairs about videos.\",\"id\":\"lmms-lab/VideoChatGPT\"},{\"description\":\"Large + video understanding dataset.\",\"id\":\"HuggingFaceFV/finevideo\"}],\"demo\":{\"inputs\":[{\"filename\":\"video-text-to-text-input.gif\",\"type\":\"img\"},{\"label\":\"Text + Prompt\",\"content\":\"What is happening in this video?\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"The + video shows a series of images showing a fountain with water jets and a variety + of colorful flowers and butterflies in the background.\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + robust video-text-to-text model that can take in image and video inputs.\",\"id\":\"llava-hf/llava-onevision-qwen2-72b-ov-hf\"},{\"description\":\"Large + and powerful video-text-to-text model that can take in image and video inputs.\",\"id\":\"llava-hf/LLaVA-NeXT-Video-34B-hf\"}],\"spaces\":[{\"description\":\"An + application to chat with a video-text-to-text model.\",\"id\":\"llava-hf/video-llava\"},{\"description\":\"A + leaderboard for various video-text-to-text models.\",\"id\":\"opencompass/openvlm_video_leaderboard\"}],\"summary\":\"Video-text-to-text + models take in a video and a text prompt and output text. These models are + also called video-language models.\",\"widgetModels\":[\"\"],\"youtubeId\":\"\",\"id\":\"video-text-to-text\",\"label\":\"Video-Text-to-Text\",\"libraries\":[\"transformers\"]},\"visual-question-answering\":{\"datasets\":[{\"description\":\"A + widely used dataset containing questions (with answers) about images.\",\"id\":\"Graphcore/vqa\"},{\"description\":\"A + dataset to benchmark visual reasoning based on text in images.\",\"id\":\"facebook/textvqa\"}],\"demo\":{\"inputs\":[{\"filename\":\"elephant.jpeg\",\"type\":\"img\"},{\"label\":\"Question\",\"content\":\"What + is in this image?\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"elephant\",\"score\":0.97},{\"label\":\"elephants\",\"score\":0.06},{\"label\":\"animal\",\"score\":0.003}]}]},\"isPlaceholder\":false,\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"Measures + how much a predicted answer differs from the ground truth based on the difference + in their semantic meaning.\",\"id\":\"wu-palmer similarity\"}],\"models\":[{\"description\":\"A + visual question answering model trained to convert charts and plots to text.\",\"id\":\"google/deplot\"},{\"description\":\"A + visual question answering model trained for mathematical reasoning and chart + derendering from images.\",\"id\":\"google/matcha-base\"},{\"description\":\"A + strong visual question answering that answers questions from book covers.\",\"id\":\"google/pix2struct-ocrvqa-large\"}],\"spaces\":[{\"description\":\"An + application that compares visual question answering models across different + tasks.\",\"id\":\"merve/pix2struct\"},{\"description\":\"An application that + can answer questions based on images.\",\"id\":\"nielsr/vilt-vqa\"},{\"description\":\"An + application that can caption images and answer questions about a given image. + \",\"id\":\"Salesforce/BLIP\"},{\"description\":\"An application that can + caption images and answer questions about a given image. \",\"id\":\"vumichien/Img2Prompt\"}],\"summary\":\"Visual + Question Answering is the task of answering open-ended questions based on + an image. They output natural language responses to natural language questions.\",\"widgetModels\":[\"dandelin/vilt-b32-finetuned-vqa\"],\"youtubeId\":\"\",\"id\":\"visual-question-answering\",\"label\":\"Visual + Question Answering\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"zero-shot-classification\":{\"datasets\":[{\"description\":\"A + widely used dataset used to benchmark multiple variants of text classification.\",\"id\":\"nyu-mll/glue\"},{\"description\":\"The + Multi-Genre Natural Language Inference (MultiNLI) corpus is a crowd-sourced + collection of 433k sentence pairs annotated with textual entailment information.\",\"id\":\"nyu-mll/multi_nli\"},{\"description\":\"FEVER + is a publicly available dataset for fact extraction and verification against + textual sources.\",\"id\":\"fever/fever\"}],\"demo\":{\"inputs\":[{\"label\":\"Text + Input\",\"content\":\"Dune is the best movie ever.\",\"type\":\"text\"},{\"label\":\"Candidate + Labels\",\"content\":\"CINEMA, ART, MUSIC\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"CINEMA\",\"score\":0.9},{\"label\":\"ART\",\"score\":0.1},{\"label\":\"MUSIC\",\"score\":0}]}]},\"metrics\":[],\"models\":[{\"description\":\"Powerful + zero-shot text classification model.\",\"id\":\"facebook/bart-large-mnli\"},{\"description\":\"Powerful + zero-shot multilingual text classification model that can accomplish multiple + tasks.\",\"id\":\"MoritzLaurer/mDeBERTa-v3-base-xnli-multilingual-nli-2mil7\"}],\"spaces\":[],\"summary\":\"Zero-shot + text classification is a task in natural language processing where a model + is trained on a set of labeled examples but is then able to classify new examples + from previously unseen classes.\",\"widgetModels\":[\"facebook/bart-large-mnli\"],\"id\":\"zero-shot-classification\",\"label\":\"Zero-Shot + Classification\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"zero-shot-image-classification\":{\"datasets\":[{\"description\":\"\",\"id\":\"\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-classification-input.jpeg\",\"type\":\"img\"},{\"label\":\"Classes\",\"content\":\"cat, + dog, bird\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Cat\",\"score\":0.664},{\"label\":\"Dog\",\"score\":0.329},{\"label\":\"Bird\",\"score\":0.008}]}]},\"metrics\":[{\"description\":\"Computes + the number of times the correct label appears in top K labels predicted\",\"id\":\"top-K + accuracy\"}],\"models\":[{\"description\":\"Robust image classification model + trained on publicly available image-caption data.\",\"id\":\"openai/clip-vit-base-patch16\"},{\"description\":\"Strong + zero-shot image classification model.\",\"id\":\"google/siglip-so400m-patch14-224\"},{\"description\":\"Small + yet powerful zero-shot image classification model that can run on edge devices.\",\"id\":\"apple/MobileCLIP-S1-OpenCLIP\"},{\"description\":\"Strong + image classification model for biomedical domain.\",\"id\":\"microsoft/BiomedCLIP-PubMedBERT_256-vit_base_patch16_224\"}],\"spaces\":[{\"description\":\"An + application that leverages zero-shot image classification to find best captions + to generate an image. \",\"id\":\"pharma/CLIP-Interrogator\"},{\"description\":\"An + application to compare different zero-shot image classification models. \",\"id\":\"merve/compare_clip_siglip\"}],\"summary\":\"Zero-shot + image classification is the task of classifying previously unseen classes + during training of a model.\",\"widgetModels\":[\"google/siglip-so400m-patch14-224\"],\"youtubeId\":\"\",\"id\":\"zero-shot-image-classification\",\"label\":\"Zero-Shot + Image Classification\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"zero-shot-object-detection\":{\"datasets\":[],\"demo\":{\"inputs\":[{\"filename\":\"zero-shot-object-detection-input.jpg\",\"type\":\"img\"},{\"label\":\"Classes\",\"content\":\"cat, + dog, bird\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"zero-shot-object-detection-output.jpg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + Average Precision (AP) metric is the Area Under the PR Curve (AUC-PR). It + is calculated for each class separately\",\"id\":\"Average Precision\"},{\"description\":\"The + Mean Average Precision (mAP) metric is the overall average of the AP values\",\"id\":\"Mean + Average Precision\"},{\"description\":\"The AP\u03B1 metric is the Average + Precision at the IoU threshold of a \u03B1 value, for example, AP50 and AP75\",\"id\":\"AP\u03B1\"}],\"models\":[{\"description\":\"Solid + zero-shot object detection model.\",\"id\":\"IDEA-Research/grounding-dino-base\"},{\"description\":\"Cutting-edge + zero-shot object detection model.\",\"id\":\"google/owlv2-base-patch16-ensemble\"}],\"spaces\":[{\"description\":\"A + demo to try the state-of-the-art zero-shot object detection model, OWLv2.\",\"id\":\"merve/owlv2\"},{\"description\":\"A + demo that combines a zero-shot object detection and mask generation model + for zero-shot segmentation.\",\"id\":\"merve/OWLSAM\"}],\"summary\":\"Zero-shot + object detection is a computer vision task to detect objects and their classes + in images, without any prior training or knowledge of the classes. Zero-shot + object detection models receive an image as input, as well as a list of candidate + classes, and output the bounding boxes and labels where the objects have been + detected.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"zero-shot-object-detection\",\"label\":\"Zero-Shot + Object Detection\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"text-to-3d\":{\"datasets\":[{\"description\":\"A + large dataset of over 10 million 3D objects.\",\"id\":\"allenai/objaverse-xl\"},{\"description\":\"Descriptive + captions for 3D objects in Objaverse.\",\"id\":\"tiange/Cap3D\"}],\"demo\":{\"inputs\":[{\"label\":\"Prompt\",\"content\":\"a + cat statue\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Result\",\"content\":\"text-to-3d-3d-output-filename.glb\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"Text-to-3D + mesh model by OpenAI\",\"id\":\"openai/shap-e\"},{\"description\":\"Generative + 3D gaussian splatting model.\",\"id\":\"ashawkey/LGM\"}],\"spaces\":[{\"description\":\"Text-to-3D + demo with mesh outputs.\",\"id\":\"hysts/Shap-E\"},{\"description\":\"Text/image-to-3D + demo with splat outputs.\",\"id\":\"ashawkey/LGM\"}],\"summary\":\"Text-to-3D + models take in text input and produce 3D output.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"text-to-3d\",\"label\":\"Text-to-3D\",\"libraries\":[\"diffusers\"]},\"image-to-3d\":{\"datasets\":[{\"description\":\"A + large dataset of over 10 million 3D objects.\",\"id\":\"allenai/objaverse-xl\"},{\"description\":\"A + dataset of isolated object images for evaluating image-to-3D models.\",\"id\":\"dylanebert/iso3d\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-to-3d-image-input.png\",\"type\":\"img\"}],\"outputs\":[{\"label\":\"Result\",\"content\":\"image-to-3d-3d-output-filename.glb\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"Fast + image-to-3D mesh model by Tencent.\",\"id\":\"TencentARC/InstantMesh\"},{\"description\":\"Fast + image-to-3D mesh model by StabilityAI\",\"id\":\"stabilityai/TripoSR\"},{\"description\":\"A + scaled up image-to-3D mesh model derived from TripoSR.\",\"id\":\"hwjiang/Real3D\"},{\"description\":\"Generative + 3D gaussian splatting model.\",\"id\":\"ashawkey/LGM\"}],\"spaces\":[{\"description\":\"Leaderboard + to evaluate image-to-3D models.\",\"id\":\"dylanebert/3d-arena\"},{\"description\":\"Image-to-3D + demo with mesh outputs.\",\"id\":\"TencentARC/InstantMesh\"},{\"description\":\"Image-to-3D + demo with mesh outputs.\",\"id\":\"stabilityai/TripoSR\"},{\"description\":\"Image-to-3D + demo with mesh outputs.\",\"id\":\"hwjiang/Real3D\"},{\"description\":\"Image-to-3D + demo with splat outputs.\",\"id\":\"dylanebert/LGM-mini\"}],\"summary\":\"Image-to-3D + models take in image input and produce 3D output.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"image-to-3d\",\"label\":\"Image-to-3D\",\"libraries\":[\"diffusers\"]}}" + headers: + Access-Control-Allow-Origin: + - https://huggingface.co + Access-Control-Expose-Headers: + - X-Repo-Commit,X-Request-Id,X-Error-Code,X-Error-Message,X-Total-Count,ETag,Link,Accept-Ranges,Content-Range + Connection: + - keep-alive + Content-Length: + - '73726' + Content-Type: + - application/json; charset=utf-8 + Date: + - Thu, 31 Oct 2024 13:22:32 GMT + ETag: + - W/"11ffe-LYeRHzSmA1Ja2XTx5UWNPygcpnc" + Referrer-Policy: + - strict-origin-when-cross-origin + Vary: + - Origin + Via: + - 1.1 8e32769b8e512c5c22a29d1d82e994b6.cloudfront.net (CloudFront) + X-Amz-Cf-Id: + - 3IHdpfjGYT9QVxBM6V0aJU4QQKQjsSP9LKB5AwHRe9pOaUyG3bLyKQ== + X-Amz-Cf-Pop: + - CCU50-P1 + X-Cache: + - Miss from cloudfront + X-Powered-By: + - huggingface-moon + X-Request-Id: + - Root=1-67238498-0ea9dbf466fca0bf543ef6a1;979e728c-a1c5-4a29-9fa3-a5eb9016586d + cross-origin-opener-policy: + - same-origin + status: + code: 200 + message: OK +- request: + body: '{"inputs": {"source_sentence": "Machine learning is so easy.", "sentences": + ["Deep learning is so straightforward.", "This is so difficult, like rocket + science.", "I can''t believe how much I struggled with this."]}}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate, br, zstd + Connection: + - keep-alive + Content-Length: + - '215' + Content-Type: + - application/json + X-Amzn-Trace-Id: + - e1be2205-17a0-4edb-b696-15673c261786 + user-agent: + - unknown/None; hf_hub/0.26.2; python/3.9.12; torch/2.4.1 + method: POST + uri: https://api-inference.huggingface.co/pipeline/sentence-similarity/BAAI/bge-small-en-v1.5 + response: + body: + string: '[0.8412457704544067,0.5477299690246582,0.5041686296463013]' + headers: + Connection: + - keep-alive + Content-Length: + - '58' + Content-Type: + - application/json + Date: + - Thu, 31 Oct 2024 13:22:33 GMT + access-control-allow-credentials: + - 'true' + vary: + - Origin, Access-Control-Request-Method, Access-Control-Request-Headers + x-compute-time: + - '0.045' + x-compute-type: + - cache + x-request-id: + - Ta-_pCZftAD1N4v_ra6SZ + x-sha: + - 5c38ec7c405ec4b44b94cc5a9bb96e735b38267a + status: + code: 200 + message: OK +version: 1 diff --git a/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_sentence_similarity_async.yaml b/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_sentence_similarity_async.yaml new file mode 100644 index 00000000000..133b33af9cd --- /dev/null +++ b/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_sentence_similarity_async.yaml @@ -0,0 +1,772 @@ +interactions: +- request: + body: null + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate, br, zstd + Connection: + - keep-alive + X-Amzn-Trace-Id: + - 6ee30058-346d-4647-b262-ba04b944d3b9 + user-agent: + - unknown/None; hf_hub/0.26.2; python/3.9.12; torch/2.4.1 + method: GET + uri: https://huggingface.co/api/tasks + response: + body: + string: "{\"any-to-any\":{\"datasets\":[],\"demo\":{\"inputs\":[],\"outputs\":[]},\"isPlaceholder\":true,\"metrics\":[],\"models\":[],\"spaces\":[],\"summary\":\"\",\"widgetModels\":[],\"id\":\"any-to-any\",\"label\":\"Any-to-Any\",\"libraries\":[\"transformers\"]},\"audio-classification\":{\"datasets\":[{\"description\":\"A + benchmark of 10 different audio tasks.\",\"id\":\"s3prl/superb\"},{\"description\":\"A + dataset of YouTube clips and their sound categories.\",\"id\":\"agkphysics/AudioSet\"}],\"demo\":{\"inputs\":[{\"filename\":\"audio.wav\",\"type\":\"audio\"}],\"outputs\":[{\"data\":[{\"label\":\"Up\",\"score\":0.2},{\"label\":\"Down\",\"score\":0.8}],\"type\":\"chart\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"An + easy-to-use model for command recognition.\",\"id\":\"speechbrain/google_speech_command_xvector\"},{\"description\":\"An + emotion recognition model.\",\"id\":\"ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition\"},{\"description\":\"A + language identification model.\",\"id\":\"facebook/mms-lid-126\"}],\"spaces\":[{\"description\":\"An + application that can classify music into different genre.\",\"id\":\"kurianbenoy/audioclassification\"}],\"summary\":\"Audio + classification is the task of assigning a label or class to a given audio. + It can be used for recognizing which command a user is giving or the emotion + of a statement, as well as identifying a speaker.\",\"widgetModels\":[\"MIT/ast-finetuned-audioset-10-10-0.4593\"],\"youtubeId\":\"KWwzcmG98Ds\",\"id\":\"audio-classification\",\"label\":\"Audio + Classification\",\"libraries\":[\"speechbrain\",\"transformers\",\"transformers.js\"]},\"audio-to-audio\":{\"datasets\":[{\"description\":\"512-element + X-vector embeddings of speakers from CMU ARCTIC dataset.\",\"id\":\"Matthijs/cmu-arctic-xvectors\"}],\"demo\":{\"inputs\":[{\"filename\":\"input.wav\",\"type\":\"audio\"}],\"outputs\":[{\"filename\":\"label-0.wav\",\"type\":\"audio\"},{\"filename\":\"label-1.wav\",\"type\":\"audio\"}]},\"metrics\":[{\"description\":\"The + Signal-to-Noise ratio is the relationship between the target signal level + and the background noise level. It is calculated as the logarithm of the target + signal divided by the background noise, in decibels.\",\"id\":\"snri\"},{\"description\":\"The + Signal-to-Distortion ratio is the relationship between the target signal and + the sum of noise, interference, and artifact errors\",\"id\":\"sdri\"}],\"models\":[{\"description\":\"A + solid model of audio source separation.\",\"id\":\"speechbrain/sepformer-wham\"},{\"description\":\"A + speech enhancement model.\",\"id\":\"ResembleAI/resemble-enhance\"},{\"description\":\"A + model that can change the voice in a speech recording.\",\"id\":\"microsoft/speecht5_vc\"}],\"spaces\":[{\"description\":\"An + application for speech separation.\",\"id\":\"younver/speechbrain-speech-separation\"},{\"description\":\"An + application for audio style transfer.\",\"id\":\"nakas/audio-diffusion_style_transfer\"}],\"summary\":\"Audio-to-Audio + is a family of tasks in which the input is an audio and the output is one + or multiple generated audios. Some example tasks are speech enhancement and + source separation.\",\"widgetModels\":[\"speechbrain/sepformer-wham\"],\"youtubeId\":\"iohj7nCCYoM\",\"id\":\"audio-to-audio\",\"label\":\"Audio-to-Audio\",\"libraries\":[\"asteroid\",\"fairseq\",\"speechbrain\"]},\"automatic-speech-recognition\":{\"datasets\":[{\"description\":\"31,175 + hours of multilingual audio-text dataset in 108 languages.\",\"id\":\"mozilla-foundation/common_voice_17_0\"},{\"description\":\"A + dataset with 44.6k hours of English speaker data and 6k hours of other language + speakers.\",\"id\":\"parler-tts/mls_eng\"},{\"description\":\"A multi-lingual + audio dataset with 370K hours of audio.\",\"id\":\"espnet/yodas\"}],\"demo\":{\"inputs\":[{\"filename\":\"input.flac\",\"type\":\"audio\"}],\"outputs\":[{\"label\":\"Transcript\",\"content\":\"Going + along slushy country roads and speaking to damp audiences in...\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"wer\"},{\"description\":\"\",\"id\":\"cer\"}],\"models\":[{\"description\":\"A + powerful ASR model by OpenAI.\",\"id\":\"openai/whisper-large-v3\"},{\"description\":\"A + good generic speech model by MetaAI for fine-tuning.\",\"id\":\"facebook/w2v-bert-2.0\"},{\"description\":\"An + end-to-end model that performs ASR and Speech Translation by MetaAI.\",\"id\":\"facebook/seamless-m4t-v2-large\"},{\"description\":\"Powerful + speaker diarization model.\",\"id\":\"pyannote/speaker-diarization-3.1\"}],\"spaces\":[{\"description\":\"A + powerful general-purpose speech recognition application.\",\"id\":\"hf-audio/whisper-large-v3\"},{\"description\":\"Fastest + speech recognition application.\",\"id\":\"sanchit-gandhi/whisper-jax\"},{\"description\":\"A + high quality speech and text translation model by Meta.\",\"id\":\"facebook/seamless_m4t\"}],\"summary\":\"Automatic + Speech Recognition (ASR), also known as Speech to Text (STT), is the task + of transcribing a given audio to text. It has many applications, such as voice + user interfaces.\",\"widgetModels\":[\"openai/whisper-large-v3\"],\"youtubeId\":\"TksaY_FDgnk\",\"id\":\"automatic-speech-recognition\",\"label\":\"Automatic + Speech Recognition\",\"libraries\":[\"espnet\",\"nemo\",\"speechbrain\",\"transformers\",\"transformers.js\"]},\"depth-estimation\":{\"datasets\":[{\"description\":\"NYU + Depth V2 Dataset: Video dataset containing both RGB and depth sensor data.\",\"id\":\"sayakpaul/nyu_depth_v2\"},{\"description\":\"Monocular + depth estimation benchmark based without noise and errors.\",\"id\":\"depth-anything/DA-2K\"}],\"demo\":{\"inputs\":[{\"filename\":\"depth-estimation-input.jpg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"depth-estimation-output.png\",\"type\":\"img\"}]},\"metrics\":[],\"models\":[{\"description\":\"Cutting-edge + depth estimation model.\",\"id\":\"depth-anything/Depth-Anything-V2-Large\"},{\"description\":\"A + strong monocular depth estimation model.\",\"id\":\"jingheya/lotus-depth-g-v1-0\"},{\"description\":\"A + depth estimation model that predicts depth in videos.\",\"id\":\"tencent/DepthCrafter\"},{\"description\":\"A + robust depth estimation model.\",\"id\":\"apple/DepthPro\"}],\"spaces\":[{\"description\":\"An + application that predicts the depth of an image and then reconstruct the 3D + model as voxels.\",\"id\":\"radames/dpt-depth-estimation-3d-voxels\"},{\"description\":\"An + application for bleeding-edge depth estimation.\",\"id\":\"akhaliq/depth-pro\"},{\"description\":\"An + application on cutting-edge depth estimation in videos.\",\"id\":\"tencent/DepthCrafter\"},{\"description\":\"A + human-centric depth estimation application.\",\"id\":\"facebook/sapiens-depth\"}],\"summary\":\"Depth + estimation is the task of predicting depth of the objects present in an image.\",\"widgetModels\":[\"\"],\"youtubeId\":\"\",\"id\":\"depth-estimation\",\"label\":\"Depth + Estimation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"document-question-answering\":{\"datasets\":[{\"description\":\"Largest + document understanding dataset.\",\"id\":\"HuggingFaceM4/Docmatix\"},{\"description\":\"Dataset + from the 2020 DocVQA challenge. The documents are taken from the UCSF Industry + Documents Library.\",\"id\":\"eliolio/docvqa\"}],\"demo\":{\"inputs\":[{\"label\":\"Question\",\"content\":\"What + is the idea behind the consumer relations efficiency team?\",\"type\":\"text\"},{\"filename\":\"document-question-answering-input.png\",\"type\":\"img\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"Balance + cost efficiency with quality customer service\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"The + evaluation metric for the DocVQA challenge is the Average Normalized Levenshtein + Similarity (ANLS). This metric is flexible to character regognition errors + and compares the predicted answer with the ground truth answer.\",\"id\":\"anls\"},{\"description\":\"Exact + Match is a metric based on the strict character match of the predicted answer + and the right answer. For answers predicted correctly, the Exact Match will + be 1. Even if only one character is different, Exact Match will be 0\",\"id\":\"exact-match\"}],\"models\":[{\"description\":\"A + robust document question answering model.\",\"id\":\"impira/layoutlm-document-qa\"},{\"description\":\"A + document question answering model specialized in invoices.\",\"id\":\"impira/layoutlm-invoices\"},{\"description\":\"A + special model for OCR-free document question answering.\",\"id\":\"microsoft/udop-large\"},{\"description\":\"A + powerful model for document question answering.\",\"id\":\"google/pix2struct-docvqa-large\"}],\"spaces\":[{\"description\":\"A + robust document question answering application.\",\"id\":\"impira/docquery\"},{\"description\":\"An + application that can answer questions from invoices.\",\"id\":\"impira/invoices\"},{\"description\":\"An + application to compare different document question answering models.\",\"id\":\"merve/compare_docvqa_models\"}],\"summary\":\"Document + Question Answering (also known as Document Visual Question Answering) is the + task of answering questions on document images. Document question answering + models take a (document, question) pair as input and return an answer in natural + language. Models usually rely on multi-modal features, combining text, position + of words (bounding-boxes) and image.\",\"widgetModels\":[\"impira/layoutlm-invoices\"],\"youtubeId\":\"\",\"id\":\"document-question-answering\",\"label\":\"Document + Question Answering\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"feature-extraction\":{\"datasets\":[{\"description\":\"Wikipedia + dataset containing cleaned articles of all languages. Can be used to train + `feature-extraction` models.\",\"id\":\"wikipedia\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"India, + officially the Republic of India, is a country in South Asia.\",\"type\":\"text\"}],\"outputs\":[{\"table\":[[\"Dimension + 1\",\"Dimension 2\",\"Dimension 3\"],[\"2.583383083343506\",\"2.757075071334839\",\"0.9023529887199402\"],[\"8.29393482208252\",\"1.1071064472198486\",\"2.03399395942688\"],[\"-0.7754912972450256\",\"-1.647324562072754\",\"-0.6113331913948059\"],[\"0.07087723910808563\",\"1.5942802429199219\",\"1.4610432386398315\"]],\"type\":\"tabular\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + powerful feature extraction model for natural language processing tasks.\",\"id\":\"thenlper/gte-large\"},{\"description\":\"A + strong feature extraction model for retrieval.\",\"id\":\"Alibaba-NLP/gte-Qwen1.5-7B-instruct\"}],\"spaces\":[{\"description\":\"A + leaderboard to rank text feature extraction models based on a benchmark.\",\"id\":\"mteb/leaderboard\"},{\"description\":\"A + leaderboard to rank best feature extraction models based on human feedback.\",\"id\":\"mteb/arena\"}],\"summary\":\"Feature + extraction is the task of extracting features learnt in a model.\",\"widgetModels\":[\"facebook/bart-base\"],\"id\":\"feature-extraction\",\"label\":\"Feature + Extraction\",\"libraries\":[\"sentence-transformers\",\"transformers\",\"transformers.js\"]},\"fill-mask\":{\"datasets\":[{\"description\":\"A + common dataset that is used to train models for many languages.\",\"id\":\"wikipedia\"},{\"description\":\"A + large English dataset with text crawled from the web.\",\"id\":\"c4\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"The + barked at me\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"wolf\",\"score\":0.487},{\"label\":\"dog\",\"score\":0.061},{\"label\":\"cat\",\"score\":0.058},{\"label\":\"fox\",\"score\":0.047},{\"label\":\"squirrel\",\"score\":0.025}]}]},\"metrics\":[{\"description\":\"Cross + Entropy is a metric that calculates the difference between two probability + distributions. Each probability distribution is the distribution of predicted + words\",\"id\":\"cross_entropy\"},{\"description\":\"Perplexity is the exponential + of the cross-entropy loss. It evaluates the probabilities assigned to the + next word by the model. Lower perplexity indicates better performance\",\"id\":\"perplexity\"}],\"models\":[{\"description\":\"The + famous BERT model.\",\"id\":\"google-bert/bert-base-uncased\"},{\"description\":\"A + multilingual model trained on 100 languages.\",\"id\":\"FacebookAI/xlm-roberta-base\"}],\"spaces\":[],\"summary\":\"Masked + language modeling is the task of masking some of the words in a sentence and + predicting which words should replace those masks. These models are useful + when we want to get a statistical understanding of the language in which the + model is trained in.\",\"widgetModels\":[\"distilroberta-base\"],\"youtubeId\":\"mqElG5QJWUg\",\"id\":\"fill-mask\",\"label\":\"Fill-Mask\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"image-classification\":{\"datasets\":[{\"description\":\"Benchmark + dataset used for image classification with images that belong to 100 classes.\",\"id\":\"cifar100\"},{\"description\":\"Dataset + consisting of images of garments.\",\"id\":\"fashion_mnist\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-classification-input.jpeg\",\"type\":\"img\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Egyptian + cat\",\"score\":0.514},{\"label\":\"Tabby cat\",\"score\":0.193},{\"label\":\"Tiger + cat\",\"score\":0.068}]}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + strong image classification model.\",\"id\":\"google/vit-base-patch16-224\"},{\"description\":\"A + robust image classification model.\",\"id\":\"facebook/deit-base-distilled-patch16-224\"},{\"description\":\"A + strong image classification model.\",\"id\":\"facebook/convnext-large-224\"}],\"spaces\":[{\"description\":\"An + application that classifies what a given image is about.\",\"id\":\"nielsr/perceiver-image-classification\"}],\"summary\":\"Image + classification is the task of assigning a label or class to an entire image. + Images are expected to have only one class for each image. Image classification + models take an image as input and return a prediction about which class the + image belongs to.\",\"widgetModels\":[\"google/vit-base-patch16-224\"],\"youtubeId\":\"tjAIM7BOYhw\",\"id\":\"image-classification\",\"label\":\"Image + Classification\",\"libraries\":[\"keras\",\"timm\",\"transformers\",\"transformers.js\"]},\"image-feature-extraction\":{\"datasets\":[{\"description\":\"ImageNet-1K + is a image classification dataset in which images are used to train image-feature-extraction + models.\",\"id\":\"imagenet-1k\"}],\"demo\":{\"inputs\":[{\"filename\":\"mask-generation-input.png\",\"type\":\"img\"}],\"outputs\":[{\"table\":[[\"Dimension + 1\",\"Dimension 2\",\"Dimension 3\"],[\"0.21236686408519745\",\"1.0919708013534546\",\"0.8512550592422485\"],[\"0.809657871723175\",\"-0.18544459342956543\",\"-0.7851548194885254\"],[\"1.3103108406066895\",\"-0.2479034662246704\",\"-0.9107287526130676\"],[\"1.8536205291748047\",\"-0.36419737339019775\",\"0.09717650711536407\"]],\"type\":\"tabular\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + powerful image feature extraction model.\",\"id\":\"timm/vit_large_patch14_dinov2.lvd142m\"},{\"description\":\"A + strong image feature extraction model.\",\"id\":\"nvidia/MambaVision-T-1K\"},{\"description\":\"A + robust image feature extraction model.\",\"id\":\"facebook/dino-vitb16\"},{\"description\":\"Strong + image feature extraction model made for information retrieval from documents.\",\"id\":\"vidore/colpali\"},{\"description\":\"Strong + image feature extraction model that can be used on images and documents.\",\"id\":\"OpenGVLab/InternViT-6B-448px-V1-2\"}],\"spaces\":[],\"summary\":\"Image + feature extraction is the task of extracting features learnt in a computer + vision model.\",\"widgetModels\":[],\"id\":\"image-feature-extraction\",\"label\":\"Image + Feature Extraction\",\"libraries\":[\"timm\",\"transformers\"]},\"image-segmentation\":{\"datasets\":[{\"description\":\"Scene + segmentation dataset.\",\"id\":\"scene_parse_150\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-segmentation-input.jpeg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"image-segmentation-output.png\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"Average + Precision (AP) is the Area Under the PR Curve (AUC-PR). It is calculated for + each semantic class separately\",\"id\":\"Average Precision\"},{\"description\":\"Mean + Average Precision (mAP) is the overall average of the AP values\",\"id\":\"Mean + Average Precision\"},{\"description\":\"Intersection over Union (IoU) is the + overlap of segmentation masks. Mean IoU is the average of the IoU of all semantic + classes\",\"id\":\"Mean Intersection over Union\"},{\"description\":\"AP\u03B1 + is the Average Precision at the IoU threshold of a \u03B1 value, for example, + AP50 and AP75\",\"id\":\"AP\u03B1\"}],\"models\":[{\"description\":\"Solid + semantic segmentation model trained on ADE20k.\",\"id\":\"openmmlab/upernet-convnext-small\"},{\"description\":\"Background + removal model.\",\"id\":\"briaai/RMBG-1.4\"},{\"description\":\"A multipurpose + image segmentation model for high resolution images.\",\"id\":\"ZhengPeng7/BiRefNet\"},{\"description\":\"Powerful + human-centric image segmentation model.\",\"id\":\"facebook/sapiens-seg-1b\"},{\"description\":\"Panoptic + segmentation model trained on the COCO (common objects) dataset.\",\"id\":\"facebook/mask2former-swin-large-coco-panoptic\"}],\"spaces\":[{\"description\":\"A + semantic segmentation application that can predict unseen instances out of + the box.\",\"id\":\"facebook/ov-seg\"},{\"description\":\"One of the strongest + segmentation applications.\",\"id\":\"jbrinkma/segment-anything\"},{\"description\":\"A + human-centric segmentation model.\",\"id\":\"facebook/sapiens-pose\"},{\"description\":\"An + instance segmentation application to predict neuronal cell types from microscopy + images.\",\"id\":\"rashmi/sartorius-cell-instance-segmentation\"},{\"description\":\"An + application that segments videos.\",\"id\":\"ArtGAN/Segment-Anything-Video\"},{\"description\":\"An + panoptic segmentation application built for outdoor environments.\",\"id\":\"segments/panoptic-segment-anything\"}],\"summary\":\"Image + Segmentation divides an image into segments where each pixel in the image + is mapped to an object. This task has multiple variants such as instance segmentation, + panoptic segmentation and semantic segmentation.\",\"widgetModels\":[\"nvidia/segformer-b0-finetuned-ade-512-512\"],\"youtubeId\":\"dKE8SIt9C-w\",\"id\":\"image-segmentation\",\"label\":\"Image + Segmentation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"image-to-image\":{\"datasets\":[{\"description\":\"Synthetic + dataset, for image relighting\",\"id\":\"VIDIT\"},{\"description\":\"Multiple + images of celebrities, used for facial expression translation\",\"id\":\"huggan/CelebA-faces\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-to-image-input.jpeg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"image-to-image-output.png\",\"type\":\"img\"}]},\"isPlaceholder\":false,\"metrics\":[{\"description\":\"Peak + Signal to Noise Ratio (PSNR) is an approximation of the human perception, + considering the ratio of the absolute intensity with respect to the variations. + Measured in dB, a high value indicates a high fidelity.\",\"id\":\"PSNR\"},{\"description\":\"Structural + Similarity Index (SSIM) is a perceptual metric which compares the luminance, + contrast and structure of two images. The values of SSIM range between -1 + and 1, and higher values indicate closer resemblance to the original image.\",\"id\":\"SSIM\"},{\"description\":\"Inception + Score (IS) is an analysis of the labels predicted by an image classification + model when presented with a sample of the generated images.\",\"id\":\"IS\"}],\"models\":[{\"description\":\"An + image-to-image model to improve image resolution.\",\"id\":\"fal/AuraSR-v2\"},{\"description\":\"A + model that increases the resolution of an image.\",\"id\":\"keras-io/super-resolution\"},{\"description\":\"A + model that creates a set of variations of the input image in the style of + DALL-E using Stable Diffusion.\",\"id\":\"lambdalabs/sd-image-variations-diffusers\"},{\"description\":\"A + model that generates images based on segments in the input image and the text + prompt.\",\"id\":\"mfidabel/controlnet-segment-anything\"},{\"description\":\"A + model that takes an image and an instruction to edit the image.\",\"id\":\"timbrooks/instruct-pix2pix\"}],\"spaces\":[{\"description\":\"Image + enhancer application for low light.\",\"id\":\"keras-io/low-light-image-enhancement\"},{\"description\":\"Style + transfer application.\",\"id\":\"keras-io/neural-style-transfer\"},{\"description\":\"An + application that generates images based on segment control.\",\"id\":\"mfidabel/controlnet-segment-anything\"},{\"description\":\"Image + generation application that takes image control and text prompt.\",\"id\":\"hysts/ControlNet\"},{\"description\":\"Colorize + any image using this app.\",\"id\":\"ioclab/brightness-controlnet\"},{\"description\":\"Edit + images with instructions.\",\"id\":\"timbrooks/instruct-pix2pix\"}],\"summary\":\"Image-to-image + is the task of transforming an input image through a variety of possible manipulations + and enhancements, such as super-resolution, image inpainting, colorization, + and more.\",\"widgetModels\":[\"stabilityai/stable-diffusion-2-inpainting\"],\"youtubeId\":\"\",\"id\":\"image-to-image\",\"label\":\"Image-to-Image\",\"libraries\":[\"diffusers\",\"transformers\",\"transformers.js\"]},\"image-text-to-text\":{\"datasets\":[{\"description\":\"Instructions + composed of image and text.\",\"id\":\"liuhaotian/LLaVA-Instruct-150K\"},{\"description\":\"Conversation + turns where questions involve image and text.\",\"id\":\"liuhaotian/LLaVA-Pretrain\"},{\"description\":\"A + collection of datasets made for model fine-tuning.\",\"id\":\"HuggingFaceM4/the_cauldron\"},{\"description\":\"Screenshots + of websites with their HTML/CSS codes.\",\"id\":\"HuggingFaceM4/WebSight\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-text-to-text-input.png\",\"type\":\"img\"},{\"label\":\"Text + Prompt\",\"content\":\"Describe the position of the bee in detail.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"The + bee is sitting on a pink flower, surrounded by other flowers. The bee is positioned + in the center of the flower, with its head and front legs sticking out.\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"Powerful + vision language model with great visual understanding and reasoning capabilities.\",\"id\":\"meta-llama/Llama-3.2-11B-Vision-Instruct\"},{\"description\":\"Cutting-edge + vision language models.\",\"id\":\"allenai/Molmo-7B-D-0924\"},{\"description\":\"Small + yet powerful model.\",\"id\":\"vikhyatk/moondream2\"},{\"description\":\"Strong + image-text-to-text model.\",\"id\":\"Qwen/Qwen2-VL-7B-Instruct\"},{\"description\":\"Strong + image-text-to-text model.\",\"id\":\"mistralai/Pixtral-12B-2409\"},{\"description\":\"Strong + image-text-to-text model focused on documents.\",\"id\":\"stepfun-ai/GOT-OCR2_0\"}],\"spaces\":[{\"description\":\"Leaderboard + to evaluate vision language models.\",\"id\":\"opencompass/open_vlm_leaderboard\"},{\"description\":\"Vision + language models arena, where models are ranked by votes of users.\",\"id\":\"WildVision/vision-arena\"},{\"description\":\"Powerful + vision-language model assistant.\",\"id\":\"akhaliq/Molmo-7B-D-0924\"},{\"description\":\"An + image-text-to-text application focused on documents.\",\"id\":\"stepfun-ai/GOT_official_online_demo\"},{\"description\":\"An + application to compare outputs of different vision language models.\",\"id\":\"merve/compare_VLMs\"},{\"description\":\"An + application for chatting with an image-text-to-text model.\",\"id\":\"GanymedeNil/Qwen2-VL-7B\"}],\"summary\":\"Image-text-to-text + models take in an image and text prompt and output text. These models are + also called vision-language models, or VLMs. The difference from image-to-text + models is that these models take an additional text input, not restricting + the model to certain use cases like image captioning, and may also be trained + to accept a conversation as input.\",\"widgetModels\":[\"meta-llama/Llama-3.2-11B-Vision-Instruct\"],\"youtubeId\":\"IoGaGfU1CIg\",\"id\":\"image-text-to-text\",\"label\":\"Image-Text-to-Text\",\"libraries\":[\"transformers\"]},\"image-to-text\":{\"datasets\":[{\"description\":\"Dataset + from 12M image-text of Reddit\",\"id\":\"red_caps\"},{\"description\":\"Dataset + from 3.3M images of Google\",\"id\":\"datasets/conceptual_captions\"}],\"demo\":{\"inputs\":[{\"filename\":\"savanna.jpg\",\"type\":\"img\"}],\"outputs\":[{\"label\":\"Detailed + description\",\"content\":\"a herd of giraffes and zebras grazing in a field\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + robust image captioning model.\",\"id\":\"Salesforce/blip2-opt-2.7b\"},{\"description\":\"A + powerful and accurate image-to-text model that can also localize concepts + in images.\",\"id\":\"microsoft/kosmos-2-patch14-224\"},{\"description\":\"A + strong optical character recognition model.\",\"id\":\"facebook/nougat-base\"},{\"description\":\"A + powerful model that lets you have a conversation with the image.\",\"id\":\"llava-hf/llava-1.5-7b-hf\"}],\"spaces\":[{\"description\":\"An + application that compares various image captioning models.\",\"id\":\"nielsr/comparing-captioning-models\"},{\"description\":\"A + robust image captioning application.\",\"id\":\"flax-community/image-captioning\"},{\"description\":\"An + application that transcribes handwritings into text.\",\"id\":\"nielsr/TrOCR-handwritten\"},{\"description\":\"An + application that can caption images and answer questions about a given image.\",\"id\":\"Salesforce/BLIP\"},{\"description\":\"An + application that can caption images and answer questions with a conversational + agent.\",\"id\":\"Salesforce/BLIP2\"},{\"description\":\"An image captioning + application that demonstrates the effect of noise on captions.\",\"id\":\"johko/capdec-image-captioning\"}],\"summary\":\"Image + to text models output a text from a given image. Image captioning or optical + character recognition can be considered as the most common applications of + image to text.\",\"widgetModels\":[\"Salesforce/blip-image-captioning-large\"],\"youtubeId\":\"\",\"id\":\"image-to-text\",\"label\":\"Image-to-Text\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"keypoint-detection\":{\"datasets\":[{\"description\":\"A + dataset of hand keypoints of over 500k examples.\",\"id\":\"Vincent-luo/hagrid-mediapipe-hands\"}],\"demo\":{\"inputs\":[{\"filename\":\"keypoint-detection-input.png\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"keypoint-detection-output.png\",\"type\":\"img\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + robust keypoint detection model.\",\"id\":\"magic-leap-community/superpoint\"},{\"description\":\"Strong + keypoint detection model used to detect human pose.\",\"id\":\"facebook/sapiens-pose-1b\"}],\"spaces\":[{\"description\":\"An + application that detects hand keypoints in real-time.\",\"id\":\"datasciencedojo/Hand-Keypoint-Detection-Realtime\"},{\"description\":\"An + application to try a universal keypoint detection model.\",\"id\":\"merve/SuperPoint\"}],\"summary\":\"Keypoint + detection is the task of identifying meaningful distinctive points or features + in an image.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"keypoint-detection\",\"label\":\"Keypoint + Detection\",\"libraries\":[\"transformers\"]},\"mask-generation\":{\"datasets\":[],\"demo\":{\"inputs\":[{\"filename\":\"mask-generation-input.png\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"mask-generation-output.png\",\"type\":\"img\"}]},\"metrics\":[],\"models\":[{\"description\":\"Small + yet powerful mask generation model.\",\"id\":\"Zigeng/SlimSAM-uniform-50\"},{\"description\":\"Very + strong mask generation model.\",\"id\":\"facebook/sam2-hiera-large\"}],\"spaces\":[{\"description\":\"An + application that combines a mask generation model with a zero-shot object + detection model for text-guided image segmentation.\",\"id\":\"merve/OWLSAM2\"},{\"description\":\"An + application that compares the performance of a large and a small mask generation + model.\",\"id\":\"merve/slimsam\"},{\"description\":\"An application based + on an improved mask generation model.\",\"id\":\"SkalskiP/segment-anything-model-2\"},{\"description\":\"An + application to remove objects from videos using mask generation models.\",\"id\":\"SkalskiP/SAM_and_ProPainter\"}],\"summary\":\"Mask + generation is the task of generating masks that identify a specific object + or region of interest in a given image. Masks are often used in segmentation + tasks, where they provide a precise way to isolate the object of interest + for further processing or analysis.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"mask-generation\",\"label\":\"Mask + Generation\",\"libraries\":[\"transformers\"]},\"object-detection\":{\"datasets\":[{\"description\":\"Widely + used benchmark dataset for multiple vision tasks.\",\"id\":\"merve/coco2017\"},{\"description\":\"Multi-task + computer vision benchmark.\",\"id\":\"merve/pascal-voc\"}],\"demo\":{\"inputs\":[{\"filename\":\"object-detection-input.jpg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"object-detection-output.jpg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + Average Precision (AP) metric is the Area Under the PR Curve (AUC-PR). It + is calculated for each class separately\",\"id\":\"Average Precision\"},{\"description\":\"The + Mean Average Precision (mAP) metric is the overall average of the AP values\",\"id\":\"Mean + Average Precision\"},{\"description\":\"The AP\u03B1 metric is the Average + Precision at the IoU threshold of a \u03B1 value, for example, AP50 and AP75\",\"id\":\"AP\u03B1\"}],\"models\":[{\"description\":\"Solid + object detection model pre-trained on the COCO 2017 dataset.\",\"id\":\"facebook/detr-resnet-50\"},{\"description\":\"Real-time + and accurate object detection model.\",\"id\":\"jameslahm/yolov10x\"},{\"description\":\"Fast + and accurate object detection model trained on COCO and Object365 datasets.\",\"id\":\"PekingU/rtdetr_r18vd_coco_o365\"}],\"spaces\":[{\"description\":\"Leaderboard + to compare various object detection models across several metrics.\",\"id\":\"hf-vision/object_detection_leaderboard\"},{\"description\":\"An + application that contains various object detection models to try from.\",\"id\":\"Gradio-Blocks/Object-Detection-With-DETR-and-YOLOS\"},{\"description\":\"An + application that shows multiple cutting edge techniques for object detection + and tracking.\",\"id\":\"kadirnar/torchyolo\"},{\"description\":\"An object + tracking, segmentation and inpainting application.\",\"id\":\"VIPLab/Track-Anything\"},{\"description\":\"Very + fast object tracking application based on object detection.\",\"id\":\"merve/RT-DETR-tracking-coco\"}],\"summary\":\"Object + Detection models allow users to identify objects of certain defined classes. + Object detection models receive an image as input and output the images with + bounding boxes and labels on detected objects.\",\"widgetModels\":[\"facebook/detr-resnet-50\"],\"youtubeId\":\"WdAeKSOpxhw\",\"id\":\"object-detection\",\"label\":\"Object + Detection\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"video-classification\":{\"datasets\":[{\"description\":\"Benchmark + dataset used for video classification with videos that belong to 400 classes.\",\"id\":\"kinetics400\"}],\"demo\":{\"inputs\":[{\"filename\":\"video-classification-input.gif\",\"type\":\"img\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Playing + Guitar\",\"score\":0.514},{\"label\":\"Playing Tennis\",\"score\":0.193},{\"label\":\"Cooking\",\"score\":0.068}]}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"Strong + Video Classification model trained on the Kinetics 400 dataset.\",\"id\":\"google/vivit-b-16x2-kinetics400\"},{\"description\":\"Strong + Video Classification model trained on the Kinetics 400 dataset.\",\"id\":\"microsoft/xclip-base-patch32\"}],\"spaces\":[{\"description\":\"An + application that classifies video at different timestamps.\",\"id\":\"nateraw/lavila\"},{\"description\":\"An + application that classifies video.\",\"id\":\"fcakyon/video-classification\"}],\"summary\":\"Video + classification is the task of assigning a label or class to an entire video. + Videos are expected to have only one class for each video. Video classification + models take a video as input and return a prediction about which class the + video belongs to.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"video-classification\",\"label\":\"Video + Classification\",\"libraries\":[\"transformers\"]},\"question-answering\":{\"datasets\":[{\"description\":\"A + famous question answering dataset based on English articles from Wikipedia.\",\"id\":\"squad_v2\"},{\"description\":\"A + dataset of aggregated anonymized actual queries issued to the Google search + engine.\",\"id\":\"natural_questions\"}],\"demo\":{\"inputs\":[{\"label\":\"Question\",\"content\":\"Which + name is also used to describe the Amazon rainforest in English?\",\"type\":\"text\"},{\"label\":\"Context\",\"content\":\"The + Amazon rainforest, also known in English as Amazonia or the Amazon Jungle\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"Amazonia\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Exact + Match is a metric based on the strict character match of the predicted answer + and the right answer. For answers predicted correctly, the Exact Match will + be 1. Even if only one character is different, Exact Match will be 0\",\"id\":\"exact-match\"},{\"description\":\" + The F1-Score metric is useful if we value both false positives and false negatives + equally. The F1-Score is calculated on each word in the predicted sequence + against the correct answer\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + robust baseline model for most question answering domains.\",\"id\":\"deepset/roberta-base-squad2\"},{\"description\":\"Small + yet robust model that can answer questions.\",\"id\":\"distilbert/distilbert-base-cased-distilled-squad\"},{\"description\":\"A + special model that can answer questions from tables.\",\"id\":\"google/tapas-base-finetuned-wtq\"}],\"spaces\":[{\"description\":\"An + application that can answer a long question from Wikipedia.\",\"id\":\"deepset/wikipedia-assistant\"}],\"summary\":\"Question + Answering models can retrieve the answer to a question from a given text, + which is useful for searching for an answer in a document. Some question answering + models can generate answers without context!\",\"widgetModels\":[\"deepset/roberta-base-squad2\"],\"youtubeId\":\"ajPx5LwJD-I\",\"id\":\"question-answering\",\"label\":\"Question + Answering\",\"libraries\":[\"adapter-transformers\",\"allennlp\",\"transformers\",\"transformers.js\"]},\"reinforcement-learning\":{\"datasets\":[{\"description\":\"A + curation of widely used datasets for Data Driven Deep Reinforcement Learning + (D4RL)\",\"id\":\"edbeeching/decision_transformer_gym_replay\"}],\"demo\":{\"inputs\":[{\"label\":\"State\",\"content\":\"Red + traffic light, pedestrians are about to pass.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Action\",\"content\":\"Stop + the car.\",\"type\":\"text\"},{\"label\":\"Next State\",\"content\":\"Yellow + light, pedestrians have crossed.\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Accumulated + reward across all time steps discounted by a factor that ranges between 0 + and 1 and determines how much the agent optimizes for future relative to immediate + rewards. Measures how good is the policy ultimately found by a given algorithm + considering uncertainty over the future.\",\"id\":\"Discounted Total Reward\"},{\"description\":\"Average + return obtained after running the policy for a certain number of evaluation + episodes. As opposed to total reward, mean reward considers how much reward + a given algorithm receives while learning.\",\"id\":\"Mean Reward\"},{\"description\":\"Measures + how good a given algorithm is after a predefined time. Some algorithms may + be guaranteed to converge to optimal behavior across many time steps. However, + an agent that reaches an acceptable level of optimality after a given time + horizon may be preferable to one that ultimately reaches optimality but takes + a long time.\",\"id\":\"Level of Performance After Some Time\"}],\"models\":[{\"description\":\"A + Reinforcement Learning model trained on expert data from the Gym Hopper environment\",\"id\":\"edbeeching/decision-transformer-gym-hopper-expert\"},{\"description\":\"A + PPO agent playing seals/CartPole-v0 using the stable-baselines3 library and + the RL Zoo.\",\"id\":\"HumanCompatibleAI/ppo-seals-CartPole-v0\"}],\"spaces\":[{\"description\":\"An + application for a cute puppy agent learning to catch a stick.\",\"id\":\"ThomasSimonini/Huggy\"},{\"description\":\"An + application to play Snowball Fight with a reinforcement learning agent.\",\"id\":\"ThomasSimonini/SnowballFight\"}],\"summary\":\"Reinforcement + learning is the computational approach of learning from action by interacting + with an environment through trial and error and receiving rewards (negative + or positive) as feedback\",\"widgetModels\":[],\"youtubeId\":\"q0BiUn5LiBc\",\"id\":\"reinforcement-learning\",\"label\":\"Reinforcement + Learning\",\"libraries\":[\"transformers\",\"stable-baselines3\",\"ml-agents\",\"sample-factory\"]},\"sentence-similarity\":{\"datasets\":[{\"description\":\"Bing + queries with relevant passages from various web sources.\",\"id\":\"ms_marco\"}],\"demo\":{\"inputs\":[{\"label\":\"Source + sentence\",\"content\":\"Machine learning is so easy.\",\"type\":\"text\"},{\"label\":\"Sentences + to compare to\",\"content\":\"Deep learning is so straightforward.\",\"type\":\"text\"},{\"label\":\"\",\"content\":\"This + is so difficult, like rocket science.\",\"type\":\"text\"},{\"label\":\"\",\"content\":\"I + can't believe how much I struggled with this.\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Deep + learning is so straightforward.\",\"score\":0.623},{\"label\":\"This is so + difficult, like rocket science.\",\"score\":0.413},{\"label\":\"I can't believe + how much I struggled with this.\",\"score\":0.256}]}]},\"metrics\":[{\"description\":\"Reciprocal + Rank is a measure used to rank the relevancy of documents given a set of documents. + Reciprocal Rank is the reciprocal of the rank of the document retrieved, meaning, + if the rank is 3, the Reciprocal Rank is 0.33. If the rank is 1, the Reciprocal + Rank is 1\",\"id\":\"Mean Reciprocal Rank\"},{\"description\":\"The similarity + of the embeddings is evaluated mainly on cosine similarity. It is calculated + as the cosine of the angle between two vectors. It is particularly useful + when your texts are not the same length\",\"id\":\"Cosine Similarity\"}],\"models\":[{\"description\":\"This + model works well for sentences and paragraphs and can be used for clustering/grouping + and semantic searches.\",\"id\":\"sentence-transformers/all-mpnet-base-v2\"},{\"description\":\"A + multilingual robust sentence similarity model..\",\"id\":\"BAAI/bge-m3\"}],\"spaces\":[{\"description\":\"An + application that leverages sentence similarity to answer questions from YouTube + videos.\",\"id\":\"Gradio-Blocks/Ask_Questions_To_YouTube_Videos\"},{\"description\":\"An + application that retrieves relevant PubMed abstracts for a given online article + which can be used as further references.\",\"id\":\"Gradio-Blocks/pubmed-abstract-retriever\"},{\"description\":\"An + application that leverages sentence similarity to summarize text.\",\"id\":\"nickmuchi/article-text-summarizer\"},{\"description\":\"A + guide that explains how Sentence Transformers can be used for semantic search.\",\"id\":\"sentence-transformers/Sentence_Transformers_for_semantic_search\"}],\"summary\":\"Sentence + Similarity is the task of determining how similar two texts are. Sentence + similarity models convert input texts into vectors (embeddings) that capture + semantic information and calculate how close (similar) they are between them. + This task is particularly useful for information retrieval and clustering/grouping.\",\"widgetModels\":[\"BAAI/bge-small-en-v1.5\"],\"youtubeId\":\"VCZq5AkbNEU\",\"id\":\"sentence-similarity\",\"label\":\"Sentence + Similarity\",\"libraries\":[\"sentence-transformers\",\"spacy\",\"transformers.js\"]},\"summarization\":{\"canonicalId\":\"text2text-generation\",\"datasets\":[{\"description\":\"News + articles in five different languages along with their summaries. Widely used + for benchmarking multilingual summarization models.\",\"id\":\"mlsum\"},{\"description\":\"English + conversations and their summaries. Useful for benchmarking conversational + agents.\",\"id\":\"samsum\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"The + tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey + building, and the tallest structure in Paris. Its base is square, measuring + 125 metres (410 ft) on each side. It was the first structure to reach a height + of 300 metres. Excluding transmitters, the Eiffel Tower is the second tallest + free-standing structure in France after the Millau Viaduct.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Output\",\"content\":\"The + tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey + building. It was the first structure to reach a height of 300 metres.\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"The + generated sequence is compared against its summary, and the overlap of tokens + are counted. ROUGE-N refers to overlap of N subsequent tokens, ROUGE-1 refers + to overlap of single tokens and ROUGE-2 is the overlap of two subsequent tokens.\",\"id\":\"rouge\"}],\"models\":[{\"description\":\"A + strong summarization model trained on English news articles. Excels at generating + factual summaries.\",\"id\":\"facebook/bart-large-cnn\"},{\"description\":\"A + summarization model trained on medical articles.\",\"id\":\"Falconsai/medical_summarization\"}],\"spaces\":[{\"description\":\"An + application that can summarize long paragraphs.\",\"id\":\"pszemraj/summarize-long-text\"},{\"description\":\"A + much needed summarization application for terms and conditions.\",\"id\":\"ml6team/distilbart-tos-summarizer-tosdr\"},{\"description\":\"An + application that summarizes long documents.\",\"id\":\"pszemraj/document-summarization\"},{\"description\":\"An + application that can detect errors in abstractive summarization.\",\"id\":\"ml6team/post-processing-summarization\"}],\"summary\":\"Summarization + is the task of producing a shorter version of a document while preserving + its important information. Some models can extract text from the original + input, while other models can generate entirely new text.\",\"widgetModels\":[\"facebook/bart-large-cnn\"],\"youtubeId\":\"yHnr5Dk2zCI\",\"id\":\"summarization\",\"label\":\"Summarization\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"table-question-answering\":{\"datasets\":[{\"description\":\"The + WikiTableQuestions dataset is a large-scale dataset for the task of question + answering on semi-structured tables.\",\"id\":\"wikitablequestions\"},{\"description\":\"WikiSQL + is a dataset of 80654 hand-annotated examples of questions and SQL queries + distributed across 24241 tables from Wikipedia.\",\"id\":\"wikisql\"}],\"demo\":{\"inputs\":[{\"table\":[[\"Rank\",\"Name\",\"No.of + reigns\",\"Combined days\"],[\"1\",\"lou Thesz\",\"3\",\"3749\"],[\"2\",\"Ric + Flair\",\"8\",\"3103\"],[\"3\",\"Harley Race\",\"7\",\"1799\"]],\"type\":\"tabular\"},{\"label\":\"Question\",\"content\":\"What + is the number of reigns for Harley Race?\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Result\",\"content\":\"7\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Checks + whether the predicted answer(s) is the same as the ground-truth answer(s).\",\"id\":\"Denotation + Accuracy\"}],\"models\":[{\"description\":\"A table question answering model + that is capable of neural SQL execution, i.e., employ TAPEX to execute a SQL + query on a given table.\",\"id\":\"microsoft/tapex-base\"},{\"description\":\"A + robust table question answering model.\",\"id\":\"google/tapas-base-finetuned-wtq\"}],\"spaces\":[{\"description\":\"An + application that answers questions based on table CSV files.\",\"id\":\"katanaml/table-query\"}],\"summary\":\"Table + Question Answering (Table QA) is the answering a question about an information + on a given table.\",\"widgetModels\":[\"google/tapas-base-finetuned-wtq\"],\"id\":\"table-question-answering\",\"label\":\"Table + Question Answering\",\"libraries\":[\"transformers\"]},\"tabular-classification\":{\"datasets\":[{\"description\":\"A + comprehensive curation of datasets covering all benchmarks.\",\"id\":\"inria-soda/tabular-benchmark\"}],\"demo\":{\"inputs\":[{\"table\":[[\"Glucose\",\"Blood + Pressure \",\"Skin Thickness\",\"Insulin\",\"BMI\"],[\"148\",\"72\",\"35\",\"0\",\"33.6\"],[\"150\",\"50\",\"30\",\"0\",\"35.1\"],[\"141\",\"60\",\"29\",\"1\",\"39.2\"]],\"type\":\"tabular\"}],\"outputs\":[{\"table\":[[\"Diabetes\"],[\"1\"],[\"1\"],[\"0\"]],\"type\":\"tabular\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"Breast + cancer prediction model based on decision trees.\",\"id\":\"scikit-learn/cancer-prediction-trees\"}],\"spaces\":[{\"description\":\"An + application that can predict defective products on a production line.\",\"id\":\"scikit-learn/tabular-playground\"},{\"description\":\"An + application that compares various tabular classification techniques on different + datasets.\",\"id\":\"scikit-learn/classification\"}],\"summary\":\"Tabular + classification is the task of classifying a target category (a group) based + on set of attributes.\",\"widgetModels\":[\"scikit-learn/tabular-playground\"],\"youtubeId\":\"\",\"id\":\"tabular-classification\",\"label\":\"Tabular + Classification\",\"libraries\":[\"sklearn\"]},\"tabular-regression\":{\"datasets\":[{\"description\":\"A + comprehensive curation of datasets covering all benchmarks.\",\"id\":\"inria-soda/tabular-benchmark\"}],\"demo\":{\"inputs\":[{\"table\":[[\"Car + Name\",\"Horsepower\",\"Weight\"],[\"ford torino\",\"140\",\"3,449\"],[\"amc + hornet\",\"97\",\"2,774\"],[\"toyota corolla\",\"65\",\"1,773\"]],\"type\":\"tabular\"}],\"outputs\":[{\"table\":[[\"MPG + (miles per gallon)\"],[\"17\"],[\"18\"],[\"31\"]],\"type\":\"tabular\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"mse\"},{\"description\":\"Coefficient + of determination (or R-squared) is a measure of how well the model fits the + data. Higher R-squared is considered a better fit.\",\"id\":\"r-squared\"}],\"models\":[{\"description\":\"Fish + weight prediction based on length measurements and species.\",\"id\":\"scikit-learn/Fish-Weight\"}],\"spaces\":[{\"description\":\"An + application that can predict weight of a fish based on set of attributes.\",\"id\":\"scikit-learn/fish-weight-prediction\"}],\"summary\":\"Tabular + regression is the task of predicting a numerical value given a set of attributes.\",\"widgetModels\":[\"scikit-learn/Fish-Weight\"],\"youtubeId\":\"\",\"id\":\"tabular-regression\",\"label\":\"Tabular + Regression\",\"libraries\":[\"sklearn\"]},\"text-classification\":{\"datasets\":[{\"description\":\"A + widely used dataset used to benchmark multiple variants of text classification.\",\"id\":\"nyu-mll/glue\"},{\"description\":\"A + text classification dataset used to benchmark natural language inference models\",\"id\":\"stanfordnlp/snli\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"I + love Hugging Face!\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"POSITIVE\",\"score\":0.9},{\"label\":\"NEUTRAL\",\"score\":0.1},{\"label\":\"NEGATIVE\",\"score\":0}]}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"The + F1 metric is the harmonic mean of the precision and recall. It can be calculated + as: F1 = 2 * (precision * recall) / (precision + recall)\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + robust model trained for sentiment analysis.\",\"id\":\"distilbert/distilbert-base-uncased-finetuned-sst-2-english\"},{\"description\":\"A + sentiment analysis model specialized in financial sentiment.\",\"id\":\"ProsusAI/finbert\"},{\"description\":\"A + sentiment analysis model specialized in analyzing tweets.\",\"id\":\"cardiffnlp/twitter-roberta-base-sentiment-latest\"},{\"description\":\"A + model that can classify languages.\",\"id\":\"papluca/xlm-roberta-base-language-detection\"},{\"description\":\"A + model that can classify text generation attacks.\",\"id\":\"meta-llama/Prompt-Guard-86M\"}],\"spaces\":[{\"description\":\"An + application that can classify financial sentiment.\",\"id\":\"IoannisTr/Tech_Stocks_Trading_Assistant\"},{\"description\":\"A + dashboard that contains various text classification tasks.\",\"id\":\"miesnerjacob/Multi-task-NLP\"},{\"description\":\"An + application that analyzes user reviews in healthcare.\",\"id\":\"spacy/healthsea-demo\"}],\"summary\":\"Text + Classification is the task of assigning a label or class to a given text. + Some use cases are sentiment analysis, natural language inference, and assessing + grammatical correctness.\",\"widgetModels\":[\"distilbert/distilbert-base-uncased-finetuned-sst-2-english\"],\"youtubeId\":\"leNG9fN9FQU\",\"id\":\"text-classification\",\"label\":\"Text + Classification\",\"libraries\":[\"adapter-transformers\",\"setfit\",\"spacy\",\"transformers\",\"transformers.js\"]},\"text-generation\":{\"datasets\":[{\"description\":\"A + large multilingual dataset of text crawled from the web.\",\"id\":\"mc4\"},{\"description\":\"Diverse + open-source data consisting of 22 smaller high-quality datasets. It was used + to train GPT-Neo.\",\"id\":\"the_pile\"},{\"description\":\"Truly open-source, + curated and cleaned dialogue dataset.\",\"id\":\"HuggingFaceH4/ultrachat_200k\"},{\"description\":\"An + instruction dataset with preference ratings on responses.\",\"id\":\"openbmb/UltraFeedback\"},{\"description\":\"A + large synthetic dataset for alignment of text generation models.\",\"id\":\"argilla/magpie-ultra-v0.1\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"Once + upon a time,\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Output\",\"content\":\"Once + upon a time, we knew that our ancestors were on the verge of extinction. The + great explorers and poets of the Old World, from Alexander the Great to Chaucer, + are dead and gone. A good many of our ancient explorers and poets have\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Cross + Entropy is a metric that calculates the difference between two probability + distributions. Each probability distribution is the distribution of predicted + words\",\"id\":\"Cross Entropy\"},{\"description\":\"The Perplexity metric + is the exponential of the cross-entropy loss. It evaluates the probabilities + assigned to the next word by the model. Lower perplexity indicates better + performance\",\"id\":\"Perplexity\"}],\"models\":[{\"description\":\"A text-generation + model trained to follow instructions.\",\"id\":\"google/gemma-2-2b-it\"},{\"description\":\"Very + powerful text generation model trained to follow instructions.\",\"id\":\"meta-llama/Meta-Llama-3.1-8B-Instruct\"},{\"description\":\"Small + yet powerful text generation model.\",\"id\":\"microsoft/Phi-3-mini-4k-instruct\"},{\"description\":\"A + very powerful model that can solve mathematical problems.\",\"id\":\"AI-MO/NuminaMath-7B-TIR\"},{\"description\":\"Strong + text generation model to follow instructions.\",\"id\":\"Qwen/Qwen2.5-7B-Instruct\"},{\"description\":\"Very + strong open-source large language model.\",\"id\":\"nvidia/Llama-3.1-Nemotron-70B-Instruct\"}],\"spaces\":[{\"description\":\"A + leaderboard to compare different open-source text generation models based + on various benchmarks.\",\"id\":\"open-llm-leaderboard/open_llm_leaderboard\"},{\"description\":\"A + leaderboard for comparing chain-of-thought performance of models.\",\"id\":\"logikon/open_cot_leaderboard\"},{\"description\":\"An + text generation based application based on a very powerful LLaMA2 model.\",\"id\":\"ysharma/Explore_llamav2_with_TGI\"},{\"description\":\"An + text generation based application to converse with Zephyr model.\",\"id\":\"HuggingFaceH4/zephyr-chat\"},{\"description\":\"A + leaderboard that ranks text generation models based on blind votes from people.\",\"id\":\"lmsys/chatbot-arena-leaderboard\"},{\"description\":\"An + chatbot to converse with a very powerful text generation model.\",\"id\":\"mlabonne/phixtral-chat\"}],\"summary\":\"Generating + text is the task of generating new text given another text. These models can, + for example, fill in incomplete text or paraphrase.\",\"widgetModels\":[\"mistralai/Mistral-Nemo-Instruct-2407\"],\"youtubeId\":\"e9gNEAlsOvU\",\"id\":\"text-generation\",\"label\":\"Text + Generation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"text-to-image\":{\"datasets\":[{\"description\":\"RedCaps + is a large-scale dataset of 12M image-text pairs collected from Reddit.\",\"id\":\"red_caps\"},{\"description\":\"Conceptual + Captions is a dataset consisting of ~3.3M images annotated with captions.\",\"id\":\"conceptual_captions\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"A + city above clouds, pastel colors, Victorian style\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"image.jpeg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + Inception Score (IS) measure assesses diversity and meaningfulness. It uses + a generated image sample to predict its label. A higher score signifies more + diverse and meaningful images.\",\"id\":\"IS\"},{\"description\":\"The Fr\xE9chet + Inception Distance (FID) calculates the distance between distributions between + synthetic and real samples. A lower FID score indicates better similarity + between the distributions of real and generated images.\",\"id\":\"FID\"},{\"description\":\"R-precision + assesses how the generated image aligns with the provided text description. + It uses the generated images as queries to retrieve relevant text descriptions. + The top 'r' relevant descriptions are selected and used to calculate R-precision + as r/R, where 'R' is the number of ground truth descriptions associated with + the generated images. A higher R-precision value indicates a better model.\",\"id\":\"R-Precision\"}],\"models\":[{\"description\":\"One + of the most powerful image generation models that can generate realistic outputs.\",\"id\":\"black-forest-labs/FLUX.1-dev\"},{\"description\":\"A + powerful yet fast image generation model.\",\"id\":\"latent-consistency/lcm-lora-sdxl\"},{\"description\":\"Text-to-image + model for photorealistic generation.\",\"id\":\"Kwai-Kolors/Kolors\"},{\"description\":\"A + powerful text-to-image model.\",\"id\":\"stabilityai/stable-diffusion-3-medium-diffusers\"}],\"spaces\":[{\"description\":\"A + powerful text-to-image application.\",\"id\":\"stabilityai/stable-diffusion-3-medium\"},{\"description\":\"A + text-to-image application to generate comics.\",\"id\":\"jbilcke-hf/ai-comic-factory\"},{\"description\":\"An + application to match multiple custom image generation models.\",\"id\":\"multimodalart/flux-lora-lab\"},{\"description\":\"A + powerful yet very fast image generation application.\",\"id\":\"latent-consistency/lcm-lora-for-sdxl\"},{\"description\":\"A + gallery to explore various text-to-image models.\",\"id\":\"multimodalart/LoraTheExplorer\"},{\"description\":\"An + application for `text-to-image`, `image-to-image` and image inpainting.\",\"id\":\"ArtGAN/Stable-Diffusion-ControlNet-WebUI\"},{\"description\":\"An + application to generate realistic images given photos of a person and a prompt.\",\"id\":\"InstantX/InstantID\"}],\"summary\":\"Text-to-image + is the task of generating images from input text. These pipelines can also + be used to modify and edit images based on text prompts.\",\"widgetModels\":[\"black-forest-labs/FLUX.1-dev\"],\"youtubeId\":\"\",\"id\":\"text-to-image\",\"label\":\"Text-to-Image\",\"libraries\":[\"diffusers\"]},\"text-to-speech\":{\"canonicalId\":\"text-to-audio\",\"datasets\":[{\"description\":\"10K + hours of multi-speaker English dataset.\",\"id\":\"parler-tts/mls_eng_10k\"},{\"description\":\"Multi-speaker + English dataset.\",\"id\":\"mythicinfinity/libritts_r\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"I + love audio models on the Hub!\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"audio.wav\",\"type\":\"audio\"}]},\"metrics\":[{\"description\":\"The + Mel Cepstral Distortion (MCD) metric is used to calculate the quality of generated + speech.\",\"id\":\"mel cepstral distortion\"}],\"models\":[{\"description\":\"A + powerful TTS model.\",\"id\":\"parler-tts/parler-tts-large-v1\"},{\"description\":\"A + massively multi-lingual TTS model.\",\"id\":\"coqui/XTTS-v2\"},{\"description\":\"Robust + TTS model.\",\"id\":\"metavoiceio/metavoice-1B-v0.1\"},{\"description\":\"A + prompt based, powerful TTS model.\",\"id\":\"parler-tts/parler_tts_mini_v0.1\"}],\"spaces\":[{\"description\":\"An + application for generate highly realistic, multilingual speech.\",\"id\":\"suno/bark\"},{\"description\":\"An + application on XTTS, a voice generation model that lets you clone voices into + different languages.\",\"id\":\"coqui/xtts\"},{\"description\":\"An application + that generates speech in different styles in English and Chinese.\",\"id\":\"mrfakename/E2-F5-TTS\"},{\"description\":\"An + application that synthesizes speech for diverse speaker prompts.\",\"id\":\"parler-tts/parler_tts_mini\"}],\"summary\":\"Text-to-Speech + (TTS) is the task of generating natural sounding speech given text input. + TTS models can be extended to have a single model that generates speech for + multiple speakers and multiple languages.\",\"widgetModels\":[\"suno/bark\"],\"youtubeId\":\"NW62DpzJ274\",\"id\":\"text-to-speech\",\"label\":\"Text-to-Speech\",\"libraries\":[\"espnet\",\"tensorflowtts\",\"transformers\",\"transformers.js\"]},\"text-to-video\":{\"datasets\":[{\"description\":\"Microsoft + Research Video to Text is a large-scale dataset for open domain video captioning\",\"id\":\"iejMac/CLIP-MSR-VTT\"},{\"description\":\"UCF101 + Human Actions dataset consists of 13,320 video clips from YouTube, with 101 + classes.\",\"id\":\"quchenyuan/UCF101-ZIP\"},{\"description\":\"A high-quality + dataset for human action recognition in YouTube videos.\",\"id\":\"nateraw/kinetics\"},{\"description\":\"A + dataset of video clips of humans performing pre-defined basic actions with + everyday objects.\",\"id\":\"HuggingFaceM4/something_something_v2\"},{\"description\":\"This + dataset consists of text-video pairs and contains noisy samples with irrelevant + video descriptions\",\"id\":\"HuggingFaceM4/webvid\"},{\"description\":\"A + dataset of short Flickr videos for the temporal localization of events with + descriptions.\",\"id\":\"iejMac/CLIP-DiDeMo\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"Darth + Vader is surfing on the waves.\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"text-to-video-output.gif\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"Inception + Score uses an image classification model that predicts class labels and evaluates + how distinct and diverse the images are. A higher score indicates better video + generation.\",\"id\":\"is\"},{\"description\":\"Frechet Inception Distance + uses an image classification model to obtain image embeddings. The metric + compares mean and standard deviation of the embeddings of real and generated + images. A smaller score indicates better video generation.\",\"id\":\"fid\"},{\"description\":\"Frechet + Video Distance uses a model that captures coherence for changes in frames + and the quality of each frame. A smaller score indicates better video generation.\",\"id\":\"fvd\"},{\"description\":\"CLIPSIM + measures similarity between video frames and text using an image-text similarity + model. A higher score indicates better video generation.\",\"id\":\"clipsim\"}],\"models\":[{\"description\":\"A + strong model for consistent video generation.\",\"id\":\"rain1011/pyramid-flow-sd3\"},{\"description\":\"A + robust model for text-to-video generation.\",\"id\":\"VideoCrafter/VideoCrafter2\"},{\"description\":\"A + cutting-edge text-to-video generation model.\",\"id\":\"TIGER-Lab/T2V-Turbo-V2\"}],\"spaces\":[{\"description\":\"An + application that generates video from text.\",\"id\":\"VideoCrafter/VideoCrafter\"},{\"description\":\"Consistent + video generation application.\",\"id\":\"TIGER-Lab/T2V-Turbo-V2\"},{\"description\":\"A + cutting edge video generation application.\",\"id\":\"Pyramid-Flow/pyramid-flow\"}],\"summary\":\"Text-to-video + models can be used in any application that requires generating consistent + sequence of images from text. \",\"widgetModels\":[],\"id\":\"text-to-video\",\"label\":\"Text-to-Video\",\"libraries\":[\"diffusers\"]},\"token-classification\":{\"datasets\":[{\"description\":\"A + widely used dataset useful to benchmark named entity recognition models.\",\"id\":\"eriktks/conll2003\"},{\"description\":\"A + multilingual dataset of Wikipedia articles annotated for named entity recognition + in over 150 different languages.\",\"id\":\"unimelb-nlp/wikiann\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"My + name is Omar and I live in Z\xFCrich.\",\"type\":\"text\"}],\"outputs\":[{\"text\":\"My + name is Omar and I live in Z\xFCrich.\",\"tokens\":[{\"type\":\"PERSON\",\"start\":11,\"end\":15},{\"type\":\"GPE\",\"start\":30,\"end\":36}],\"type\":\"text-with-tokens\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + robust performance model to identify people, locations, organizations and + names of miscellaneous entities.\",\"id\":\"dslim/bert-base-NER\"},{\"description\":\"A + strong model to identify people, locations, organizations and names in multiple + languages.\",\"id\":\"FacebookAI/xlm-roberta-large-finetuned-conll03-english\"},{\"description\":\"A + token classification model specialized on medical entity recognition.\",\"id\":\"blaze999/Medical-NER\"},{\"description\":\"Flair + models are typically the state of the art in named entity recognition tasks.\",\"id\":\"flair/ner-english\"}],\"spaces\":[{\"description\":\"An + application that can recognizes entities, extracts noun chunks and recognizes + various linguistic features of each token.\",\"id\":\"spacy/gradio_pipeline_visualizer\"}],\"summary\":\"Token + classification is a natural language understanding task in which a label is + assigned to some tokens in a text. Some popular token classification subtasks + are Named Entity Recognition (NER) and Part-of-Speech (PoS) tagging. NER models + could be trained to identify specific entities in a text, such as dates, individuals + and places; and PoS tagging would identify, for example, which words in a + text are verbs, nouns, and punctuation marks.\",\"widgetModels\":[\"FacebookAI/xlm-roberta-large-finetuned-conll03-english\"],\"youtubeId\":\"wVHdVlPScxA\",\"id\":\"token-classification\",\"label\":\"Token + Classification\",\"libraries\":[\"adapter-transformers\",\"flair\",\"spacy\",\"span-marker\",\"stanza\",\"transformers\",\"transformers.js\"]},\"translation\":{\"canonicalId\":\"text2text-generation\",\"datasets\":[{\"description\":\"A + dataset of copyright-free books translated into 16 different languages.\",\"id\":\"Helsinki-NLP/opus_books\"},{\"description\":\"An + example of translation between programming languages. This dataset consists + of functions in Java and C#.\",\"id\":\"google/code_x_glue_cc_code_to_code_trans\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"My + name is Omar and I live in Z\xFCrich.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Output\",\"content\":\"Mein + Name ist Omar und ich wohne in Z\xFCrich.\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"BLEU + score is calculated by counting the number of shared single or subsequent + tokens between the generated sequence and the reference. Subsequent n tokens + are called \u201Cn-grams\u201D. Unigram refers to a single token while bi-gram + refers to token pairs and n-grams refer to n subsequent tokens. The score + ranges from 0 to 1, where 1 means the translation perfectly matched and 0 + did not match at all\",\"id\":\"bleu\"},{\"description\":\"\",\"id\":\"sacrebleu\"}],\"models\":[{\"description\":\"Very + powerful model that can translate many languages between each other, especially + low-resource languages.\",\"id\":\"facebook/nllb-200-1.3B\"},{\"description\":\"A + general-purpose Transformer that can be used to translate from English to + German, French, or Romanian.\",\"id\":\"google-t5/t5-base\"}],\"spaces\":[{\"description\":\"An + application that can translate between 100 languages.\",\"id\":\"Iker/Translate-100-languages\"},{\"description\":\"An + application that can translate between many languages.\",\"id\":\"Geonmo/nllb-translation-demo\"}],\"summary\":\"Translation + is the task of converting text from one language to another.\",\"widgetModels\":[\"facebook/mbart-large-50-many-to-many-mmt\"],\"youtubeId\":\"1JvfrvZgi6c\",\"id\":\"translation\",\"label\":\"Translation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"unconditional-image-generation\":{\"datasets\":[{\"description\":\"The + CIFAR-100 dataset consists of 60000 32x32 colour images in 100 classes, with + 600 images per class.\",\"id\":\"cifar100\"},{\"description\":\"Multiple images + of celebrities, used for facial expression translation.\",\"id\":\"CelebA\"}],\"demo\":{\"inputs\":[{\"label\":\"Seed\",\"content\":\"42\",\"type\":\"text\"},{\"label\":\"Number + of images to generate:\",\"content\":\"4\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"unconditional-image-generation-output.jpeg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + inception score (IS) evaluates the quality of generated images. It measures + the diversity of the generated images (the model predictions are evenly distributed + across all possible labels) and their 'distinction' or 'sharpness' (the model + confidently predicts a single label for each image).\",\"id\":\"Inception + score (IS)\"},{\"description\":\"The Fr\xE9chet Inception Distance (FID) evaluates + the quality of images created by a generative model by calculating the distance + between feature vectors for real and generated images.\",\"id\":\"Fre\u0107het + Inception Distance (FID)\"}],\"models\":[{\"description\":\"High-quality image + generation model trained on the CIFAR-10 dataset. It synthesizes images of + the ten classes presented in the dataset using diffusion probabilistic models, + a class of latent variable models inspired by considerations from nonequilibrium + thermodynamics.\",\"id\":\"google/ddpm-cifar10-32\"},{\"description\":\"High-quality + image generation model trained on the 256x256 CelebA-HQ dataset. It synthesizes + images of faces using diffusion probabilistic models, a class of latent variable + models inspired by considerations from nonequilibrium thermodynamics.\",\"id\":\"google/ddpm-celebahq-256\"}],\"spaces\":[{\"description\":\"An + application that can generate realistic faces.\",\"id\":\"CompVis/celeba-latent-diffusion\"}],\"summary\":\"Unconditional + image generation is the task of generating images with no condition in any + context (like a prompt text or another image). Once trained, the model will + create images that resemble its training data distribution.\",\"widgetModels\":[\"\"],\"youtubeId\":\"\",\"id\":\"unconditional-image-generation\",\"label\":\"Unconditional + Image Generation\",\"libraries\":[\"diffusers\"]},\"video-text-to-text\":{\"datasets\":[{\"description\":\"Multiple-choice + questions and answers about videos.\",\"id\":\"lmms-lab/Video-MME\"},{\"description\":\"A + dataset of instructions and question-answer pairs about videos.\",\"id\":\"lmms-lab/VideoChatGPT\"},{\"description\":\"Large + video understanding dataset.\",\"id\":\"HuggingFaceFV/finevideo\"}],\"demo\":{\"inputs\":[{\"filename\":\"video-text-to-text-input.gif\",\"type\":\"img\"},{\"label\":\"Text + Prompt\",\"content\":\"What is happening in this video?\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"The + video shows a series of images showing a fountain with water jets and a variety + of colorful flowers and butterflies in the background.\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + robust video-text-to-text model that can take in image and video inputs.\",\"id\":\"llava-hf/llava-onevision-qwen2-72b-ov-hf\"},{\"description\":\"Large + and powerful video-text-to-text model that can take in image and video inputs.\",\"id\":\"llava-hf/LLaVA-NeXT-Video-34B-hf\"}],\"spaces\":[{\"description\":\"An + application to chat with a video-text-to-text model.\",\"id\":\"llava-hf/video-llava\"},{\"description\":\"A + leaderboard for various video-text-to-text models.\",\"id\":\"opencompass/openvlm_video_leaderboard\"}],\"summary\":\"Video-text-to-text + models take in a video and a text prompt and output text. These models are + also called video-language models.\",\"widgetModels\":[\"\"],\"youtubeId\":\"\",\"id\":\"video-text-to-text\",\"label\":\"Video-Text-to-Text\",\"libraries\":[\"transformers\"]},\"visual-question-answering\":{\"datasets\":[{\"description\":\"A + widely used dataset containing questions (with answers) about images.\",\"id\":\"Graphcore/vqa\"},{\"description\":\"A + dataset to benchmark visual reasoning based on text in images.\",\"id\":\"facebook/textvqa\"}],\"demo\":{\"inputs\":[{\"filename\":\"elephant.jpeg\",\"type\":\"img\"},{\"label\":\"Question\",\"content\":\"What + is in this image?\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"elephant\",\"score\":0.97},{\"label\":\"elephants\",\"score\":0.06},{\"label\":\"animal\",\"score\":0.003}]}]},\"isPlaceholder\":false,\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"Measures + how much a predicted answer differs from the ground truth based on the difference + in their semantic meaning.\",\"id\":\"wu-palmer similarity\"}],\"models\":[{\"description\":\"A + visual question answering model trained to convert charts and plots to text.\",\"id\":\"google/deplot\"},{\"description\":\"A + visual question answering model trained for mathematical reasoning and chart + derendering from images.\",\"id\":\"google/matcha-base\"},{\"description\":\"A + strong visual question answering that answers questions from book covers.\",\"id\":\"google/pix2struct-ocrvqa-large\"}],\"spaces\":[{\"description\":\"An + application that compares visual question answering models across different + tasks.\",\"id\":\"merve/pix2struct\"},{\"description\":\"An application that + can answer questions based on images.\",\"id\":\"nielsr/vilt-vqa\"},{\"description\":\"An + application that can caption images and answer questions about a given image. + \",\"id\":\"Salesforce/BLIP\"},{\"description\":\"An application that can + caption images and answer questions about a given image. \",\"id\":\"vumichien/Img2Prompt\"}],\"summary\":\"Visual + Question Answering is the task of answering open-ended questions based on + an image. They output natural language responses to natural language questions.\",\"widgetModels\":[\"dandelin/vilt-b32-finetuned-vqa\"],\"youtubeId\":\"\",\"id\":\"visual-question-answering\",\"label\":\"Visual + Question Answering\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"zero-shot-classification\":{\"datasets\":[{\"description\":\"A + widely used dataset used to benchmark multiple variants of text classification.\",\"id\":\"nyu-mll/glue\"},{\"description\":\"The + Multi-Genre Natural Language Inference (MultiNLI) corpus is a crowd-sourced + collection of 433k sentence pairs annotated with textual entailment information.\",\"id\":\"nyu-mll/multi_nli\"},{\"description\":\"FEVER + is a publicly available dataset for fact extraction and verification against + textual sources.\",\"id\":\"fever/fever\"}],\"demo\":{\"inputs\":[{\"label\":\"Text + Input\",\"content\":\"Dune is the best movie ever.\",\"type\":\"text\"},{\"label\":\"Candidate + Labels\",\"content\":\"CINEMA, ART, MUSIC\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"CINEMA\",\"score\":0.9},{\"label\":\"ART\",\"score\":0.1},{\"label\":\"MUSIC\",\"score\":0}]}]},\"metrics\":[],\"models\":[{\"description\":\"Powerful + zero-shot text classification model.\",\"id\":\"facebook/bart-large-mnli\"},{\"description\":\"Powerful + zero-shot multilingual text classification model that can accomplish multiple + tasks.\",\"id\":\"MoritzLaurer/mDeBERTa-v3-base-xnli-multilingual-nli-2mil7\"}],\"spaces\":[],\"summary\":\"Zero-shot + text classification is a task in natural language processing where a model + is trained on a set of labeled examples but is then able to classify new examples + from previously unseen classes.\",\"widgetModels\":[\"facebook/bart-large-mnli\"],\"id\":\"zero-shot-classification\",\"label\":\"Zero-Shot + Classification\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"zero-shot-image-classification\":{\"datasets\":[{\"description\":\"\",\"id\":\"\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-classification-input.jpeg\",\"type\":\"img\"},{\"label\":\"Classes\",\"content\":\"cat, + dog, bird\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Cat\",\"score\":0.664},{\"label\":\"Dog\",\"score\":0.329},{\"label\":\"Bird\",\"score\":0.008}]}]},\"metrics\":[{\"description\":\"Computes + the number of times the correct label appears in top K labels predicted\",\"id\":\"top-K + accuracy\"}],\"models\":[{\"description\":\"Robust image classification model + trained on publicly available image-caption data.\",\"id\":\"openai/clip-vit-base-patch16\"},{\"description\":\"Strong + zero-shot image classification model.\",\"id\":\"google/siglip-so400m-patch14-224\"},{\"description\":\"Small + yet powerful zero-shot image classification model that can run on edge devices.\",\"id\":\"apple/MobileCLIP-S1-OpenCLIP\"},{\"description\":\"Strong + image classification model for biomedical domain.\",\"id\":\"microsoft/BiomedCLIP-PubMedBERT_256-vit_base_patch16_224\"}],\"spaces\":[{\"description\":\"An + application that leverages zero-shot image classification to find best captions + to generate an image. \",\"id\":\"pharma/CLIP-Interrogator\"},{\"description\":\"An + application to compare different zero-shot image classification models. \",\"id\":\"merve/compare_clip_siglip\"}],\"summary\":\"Zero-shot + image classification is the task of classifying previously unseen classes + during training of a model.\",\"widgetModels\":[\"google/siglip-so400m-patch14-224\"],\"youtubeId\":\"\",\"id\":\"zero-shot-image-classification\",\"label\":\"Zero-Shot + Image Classification\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"zero-shot-object-detection\":{\"datasets\":[],\"demo\":{\"inputs\":[{\"filename\":\"zero-shot-object-detection-input.jpg\",\"type\":\"img\"},{\"label\":\"Classes\",\"content\":\"cat, + dog, bird\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"zero-shot-object-detection-output.jpg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + Average Precision (AP) metric is the Area Under the PR Curve (AUC-PR). It + is calculated for each class separately\",\"id\":\"Average Precision\"},{\"description\":\"The + Mean Average Precision (mAP) metric is the overall average of the AP values\",\"id\":\"Mean + Average Precision\"},{\"description\":\"The AP\u03B1 metric is the Average + Precision at the IoU threshold of a \u03B1 value, for example, AP50 and AP75\",\"id\":\"AP\u03B1\"}],\"models\":[{\"description\":\"Solid + zero-shot object detection model.\",\"id\":\"IDEA-Research/grounding-dino-base\"},{\"description\":\"Cutting-edge + zero-shot object detection model.\",\"id\":\"google/owlv2-base-patch16-ensemble\"}],\"spaces\":[{\"description\":\"A + demo to try the state-of-the-art zero-shot object detection model, OWLv2.\",\"id\":\"merve/owlv2\"},{\"description\":\"A + demo that combines a zero-shot object detection and mask generation model + for zero-shot segmentation.\",\"id\":\"merve/OWLSAM\"}],\"summary\":\"Zero-shot + object detection is a computer vision task to detect objects and their classes + in images, without any prior training or knowledge of the classes. Zero-shot + object detection models receive an image as input, as well as a list of candidate + classes, and output the bounding boxes and labels where the objects have been + detected.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"zero-shot-object-detection\",\"label\":\"Zero-Shot + Object Detection\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"text-to-3d\":{\"datasets\":[{\"description\":\"A + large dataset of over 10 million 3D objects.\",\"id\":\"allenai/objaverse-xl\"},{\"description\":\"Descriptive + captions for 3D objects in Objaverse.\",\"id\":\"tiange/Cap3D\"}],\"demo\":{\"inputs\":[{\"label\":\"Prompt\",\"content\":\"a + cat statue\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Result\",\"content\":\"text-to-3d-3d-output-filename.glb\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"Text-to-3D + mesh model by OpenAI\",\"id\":\"openai/shap-e\"},{\"description\":\"Generative + 3D gaussian splatting model.\",\"id\":\"ashawkey/LGM\"}],\"spaces\":[{\"description\":\"Text-to-3D + demo with mesh outputs.\",\"id\":\"hysts/Shap-E\"},{\"description\":\"Text/image-to-3D + demo with splat outputs.\",\"id\":\"ashawkey/LGM\"}],\"summary\":\"Text-to-3D + models take in text input and produce 3D output.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"text-to-3d\",\"label\":\"Text-to-3D\",\"libraries\":[\"diffusers\"]},\"image-to-3d\":{\"datasets\":[{\"description\":\"A + large dataset of over 10 million 3D objects.\",\"id\":\"allenai/objaverse-xl\"},{\"description\":\"A + dataset of isolated object images for evaluating image-to-3D models.\",\"id\":\"dylanebert/iso3d\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-to-3d-image-input.png\",\"type\":\"img\"}],\"outputs\":[{\"label\":\"Result\",\"content\":\"image-to-3d-3d-output-filename.glb\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"Fast + image-to-3D mesh model by Tencent.\",\"id\":\"TencentARC/InstantMesh\"},{\"description\":\"Fast + image-to-3D mesh model by StabilityAI\",\"id\":\"stabilityai/TripoSR\"},{\"description\":\"A + scaled up image-to-3D mesh model derived from TripoSR.\",\"id\":\"hwjiang/Real3D\"},{\"description\":\"Generative + 3D gaussian splatting model.\",\"id\":\"ashawkey/LGM\"}],\"spaces\":[{\"description\":\"Leaderboard + to evaluate image-to-3D models.\",\"id\":\"dylanebert/3d-arena\"},{\"description\":\"Image-to-3D + demo with mesh outputs.\",\"id\":\"TencentARC/InstantMesh\"},{\"description\":\"Image-to-3D + demo with mesh outputs.\",\"id\":\"stabilityai/TripoSR\"},{\"description\":\"Image-to-3D + demo with mesh outputs.\",\"id\":\"hwjiang/Real3D\"},{\"description\":\"Image-to-3D + demo with splat outputs.\",\"id\":\"dylanebert/LGM-mini\"}],\"summary\":\"Image-to-3D + models take in image input and produce 3D output.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"image-to-3d\",\"label\":\"Image-to-3D\",\"libraries\":[\"diffusers\"]}}" + headers: + Access-Control-Allow-Origin: + - https://huggingface.co + Access-Control-Expose-Headers: + - X-Repo-Commit,X-Request-Id,X-Error-Code,X-Error-Message,X-Total-Count,ETag,Link,Accept-Ranges,Content-Range + Connection: + - keep-alive + Content-Length: + - '73726' + Content-Type: + - application/json; charset=utf-8 + Date: + - Thu, 31 Oct 2024 13:22:44 GMT + ETag: + - W/"11ffe-LYeRHzSmA1Ja2XTx5UWNPygcpnc" + Referrer-Policy: + - strict-origin-when-cross-origin + Vary: + - Origin + Via: + - 1.1 1f9395248c2468f012a231db8d5dfd1e.cloudfront.net (CloudFront) + X-Amz-Cf-Id: + - eI9C3DjoGeBi8fcYOUVskwe5nnHaffRNRwswkkjialFdgcNag7XhSQ== + X-Amz-Cf-Pop: + - CCU50-P1 + X-Cache: + - Miss from cloudfront + X-Powered-By: + - huggingface-moon + X-Request-Id: + - Root=1-672384a4-23bfd20f18e2c2653bc450e2;6ee30058-346d-4647-b262-ba04b944d3b9 + cross-origin-opener-policy: + - same-origin + status: + code: 200 + message: OK +- request: + body: null + headers: + user-agent: + - unknown/None; hf_hub/0.26.2; python/3.9.12; torch/2.4.1 + method: POST + uri: https://api-inference.huggingface.co/pipeline/sentence-similarity/BAAI/bge-small-en-v1.5 + response: + body: + string: '[0.8412457704544067,0.5477299690246582,0.5041686296463013]' + headers: + Access-Control-Allow-Credentials: + - 'true' + Connection: + - keep-alive + Content-Length: + - '58' + Content-Type: + - application/json + Date: + - Thu, 31 Oct 2024 13:22:45 GMT + Vary: + - Origin, Access-Control-Request-Method, Access-Control-Request-Headers + x-compute-time: + - '0.045' + x-compute-type: + - cache + x-request-id: + - OGPG8nvgorjDGMm12HF7S + x-sha: + - 5c38ec7c405ec4b44b94cc5a9bb96e735b38267a + status: + code: 200 + message: OK +version: 1 diff --git a/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_summarization.yaml b/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_summarization.yaml new file mode 100644 index 00000000000..6a12625a175 --- /dev/null +++ b/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_summarization.yaml @@ -0,0 +1,795 @@ +interactions: +- request: + body: null + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate, br, zstd + Connection: + - keep-alive + X-Amzn-Trace-Id: + - 633ce332-181c-4ea9-b395-a44c73013939 + user-agent: + - unknown/None; hf_hub/0.26.2; python/3.9.12; torch/2.4.1 + method: GET + uri: https://huggingface.co/api/tasks + response: + body: + string: "{\"any-to-any\":{\"datasets\":[],\"demo\":{\"inputs\":[],\"outputs\":[]},\"isPlaceholder\":true,\"metrics\":[],\"models\":[],\"spaces\":[],\"summary\":\"\",\"widgetModels\":[],\"id\":\"any-to-any\",\"label\":\"Any-to-Any\",\"libraries\":[\"transformers\"]},\"audio-classification\":{\"datasets\":[{\"description\":\"A + benchmark of 10 different audio tasks.\",\"id\":\"s3prl/superb\"},{\"description\":\"A + dataset of YouTube clips and their sound categories.\",\"id\":\"agkphysics/AudioSet\"}],\"demo\":{\"inputs\":[{\"filename\":\"audio.wav\",\"type\":\"audio\"}],\"outputs\":[{\"data\":[{\"label\":\"Up\",\"score\":0.2},{\"label\":\"Down\",\"score\":0.8}],\"type\":\"chart\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"An + easy-to-use model for command recognition.\",\"id\":\"speechbrain/google_speech_command_xvector\"},{\"description\":\"An + emotion recognition model.\",\"id\":\"ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition\"},{\"description\":\"A + language identification model.\",\"id\":\"facebook/mms-lid-126\"}],\"spaces\":[{\"description\":\"An + application that can classify music into different genre.\",\"id\":\"kurianbenoy/audioclassification\"}],\"summary\":\"Audio + classification is the task of assigning a label or class to a given audio. + It can be used for recognizing which command a user is giving or the emotion + of a statement, as well as identifying a speaker.\",\"widgetModels\":[\"MIT/ast-finetuned-audioset-10-10-0.4593\"],\"youtubeId\":\"KWwzcmG98Ds\",\"id\":\"audio-classification\",\"label\":\"Audio + Classification\",\"libraries\":[\"speechbrain\",\"transformers\",\"transformers.js\"]},\"audio-to-audio\":{\"datasets\":[{\"description\":\"512-element + X-vector embeddings of speakers from CMU ARCTIC dataset.\",\"id\":\"Matthijs/cmu-arctic-xvectors\"}],\"demo\":{\"inputs\":[{\"filename\":\"input.wav\",\"type\":\"audio\"}],\"outputs\":[{\"filename\":\"label-0.wav\",\"type\":\"audio\"},{\"filename\":\"label-1.wav\",\"type\":\"audio\"}]},\"metrics\":[{\"description\":\"The + Signal-to-Noise ratio is the relationship between the target signal level + and the background noise level. It is calculated as the logarithm of the target + signal divided by the background noise, in decibels.\",\"id\":\"snri\"},{\"description\":\"The + Signal-to-Distortion ratio is the relationship between the target signal and + the sum of noise, interference, and artifact errors\",\"id\":\"sdri\"}],\"models\":[{\"description\":\"A + solid model of audio source separation.\",\"id\":\"speechbrain/sepformer-wham\"},{\"description\":\"A + speech enhancement model.\",\"id\":\"ResembleAI/resemble-enhance\"},{\"description\":\"A + model that can change the voice in a speech recording.\",\"id\":\"microsoft/speecht5_vc\"}],\"spaces\":[{\"description\":\"An + application for speech separation.\",\"id\":\"younver/speechbrain-speech-separation\"},{\"description\":\"An + application for audio style transfer.\",\"id\":\"nakas/audio-diffusion_style_transfer\"}],\"summary\":\"Audio-to-Audio + is a family of tasks in which the input is an audio and the output is one + or multiple generated audios. Some example tasks are speech enhancement and + source separation.\",\"widgetModels\":[\"speechbrain/sepformer-wham\"],\"youtubeId\":\"iohj7nCCYoM\",\"id\":\"audio-to-audio\",\"label\":\"Audio-to-Audio\",\"libraries\":[\"asteroid\",\"fairseq\",\"speechbrain\"]},\"automatic-speech-recognition\":{\"datasets\":[{\"description\":\"31,175 + hours of multilingual audio-text dataset in 108 languages.\",\"id\":\"mozilla-foundation/common_voice_17_0\"},{\"description\":\"A + dataset with 44.6k hours of English speaker data and 6k hours of other language + speakers.\",\"id\":\"parler-tts/mls_eng\"},{\"description\":\"A multi-lingual + audio dataset with 370K hours of audio.\",\"id\":\"espnet/yodas\"}],\"demo\":{\"inputs\":[{\"filename\":\"input.flac\",\"type\":\"audio\"}],\"outputs\":[{\"label\":\"Transcript\",\"content\":\"Going + along slushy country roads and speaking to damp audiences in...\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"wer\"},{\"description\":\"\",\"id\":\"cer\"}],\"models\":[{\"description\":\"A + powerful ASR model by OpenAI.\",\"id\":\"openai/whisper-large-v3\"},{\"description\":\"A + good generic speech model by MetaAI for fine-tuning.\",\"id\":\"facebook/w2v-bert-2.0\"},{\"description\":\"An + end-to-end model that performs ASR and Speech Translation by MetaAI.\",\"id\":\"facebook/seamless-m4t-v2-large\"},{\"description\":\"Powerful + speaker diarization model.\",\"id\":\"pyannote/speaker-diarization-3.1\"}],\"spaces\":[{\"description\":\"A + powerful general-purpose speech recognition application.\",\"id\":\"hf-audio/whisper-large-v3\"},{\"description\":\"Fastest + speech recognition application.\",\"id\":\"sanchit-gandhi/whisper-jax\"},{\"description\":\"A + high quality speech and text translation model by Meta.\",\"id\":\"facebook/seamless_m4t\"}],\"summary\":\"Automatic + Speech Recognition (ASR), also known as Speech to Text (STT), is the task + of transcribing a given audio to text. It has many applications, such as voice + user interfaces.\",\"widgetModels\":[\"openai/whisper-large-v3\"],\"youtubeId\":\"TksaY_FDgnk\",\"id\":\"automatic-speech-recognition\",\"label\":\"Automatic + Speech Recognition\",\"libraries\":[\"espnet\",\"nemo\",\"speechbrain\",\"transformers\",\"transformers.js\"]},\"depth-estimation\":{\"datasets\":[{\"description\":\"NYU + Depth V2 Dataset: Video dataset containing both RGB and depth sensor data.\",\"id\":\"sayakpaul/nyu_depth_v2\"},{\"description\":\"Monocular + depth estimation benchmark based without noise and errors.\",\"id\":\"depth-anything/DA-2K\"}],\"demo\":{\"inputs\":[{\"filename\":\"depth-estimation-input.jpg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"depth-estimation-output.png\",\"type\":\"img\"}]},\"metrics\":[],\"models\":[{\"description\":\"Cutting-edge + depth estimation model.\",\"id\":\"depth-anything/Depth-Anything-V2-Large\"},{\"description\":\"A + strong monocular depth estimation model.\",\"id\":\"jingheya/lotus-depth-g-v1-0\"},{\"description\":\"A + depth estimation model that predicts depth in videos.\",\"id\":\"tencent/DepthCrafter\"},{\"description\":\"A + robust depth estimation model.\",\"id\":\"apple/DepthPro\"}],\"spaces\":[{\"description\":\"An + application that predicts the depth of an image and then reconstruct the 3D + model as voxels.\",\"id\":\"radames/dpt-depth-estimation-3d-voxels\"},{\"description\":\"An + application for bleeding-edge depth estimation.\",\"id\":\"akhaliq/depth-pro\"},{\"description\":\"An + application on cutting-edge depth estimation in videos.\",\"id\":\"tencent/DepthCrafter\"},{\"description\":\"A + human-centric depth estimation application.\",\"id\":\"facebook/sapiens-depth\"}],\"summary\":\"Depth + estimation is the task of predicting depth of the objects present in an image.\",\"widgetModels\":[\"\"],\"youtubeId\":\"\",\"id\":\"depth-estimation\",\"label\":\"Depth + Estimation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"document-question-answering\":{\"datasets\":[{\"description\":\"Largest + document understanding dataset.\",\"id\":\"HuggingFaceM4/Docmatix\"},{\"description\":\"Dataset + from the 2020 DocVQA challenge. The documents are taken from the UCSF Industry + Documents Library.\",\"id\":\"eliolio/docvqa\"}],\"demo\":{\"inputs\":[{\"label\":\"Question\",\"content\":\"What + is the idea behind the consumer relations efficiency team?\",\"type\":\"text\"},{\"filename\":\"document-question-answering-input.png\",\"type\":\"img\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"Balance + cost efficiency with quality customer service\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"The + evaluation metric for the DocVQA challenge is the Average Normalized Levenshtein + Similarity (ANLS). This metric is flexible to character regognition errors + and compares the predicted answer with the ground truth answer.\",\"id\":\"anls\"},{\"description\":\"Exact + Match is a metric based on the strict character match of the predicted answer + and the right answer. For answers predicted correctly, the Exact Match will + be 1. Even if only one character is different, Exact Match will be 0\",\"id\":\"exact-match\"}],\"models\":[{\"description\":\"A + robust document question answering model.\",\"id\":\"impira/layoutlm-document-qa\"},{\"description\":\"A + document question answering model specialized in invoices.\",\"id\":\"impira/layoutlm-invoices\"},{\"description\":\"A + special model for OCR-free document question answering.\",\"id\":\"microsoft/udop-large\"},{\"description\":\"A + powerful model for document question answering.\",\"id\":\"google/pix2struct-docvqa-large\"}],\"spaces\":[{\"description\":\"A + robust document question answering application.\",\"id\":\"impira/docquery\"},{\"description\":\"An + application that can answer questions from invoices.\",\"id\":\"impira/invoices\"},{\"description\":\"An + application to compare different document question answering models.\",\"id\":\"merve/compare_docvqa_models\"}],\"summary\":\"Document + Question Answering (also known as Document Visual Question Answering) is the + task of answering questions on document images. Document question answering + models take a (document, question) pair as input and return an answer in natural + language. Models usually rely on multi-modal features, combining text, position + of words (bounding-boxes) and image.\",\"widgetModels\":[\"impira/layoutlm-invoices\"],\"youtubeId\":\"\",\"id\":\"document-question-answering\",\"label\":\"Document + Question Answering\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"feature-extraction\":{\"datasets\":[{\"description\":\"Wikipedia + dataset containing cleaned articles of all languages. Can be used to train + `feature-extraction` models.\",\"id\":\"wikipedia\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"India, + officially the Republic of India, is a country in South Asia.\",\"type\":\"text\"}],\"outputs\":[{\"table\":[[\"Dimension + 1\",\"Dimension 2\",\"Dimension 3\"],[\"2.583383083343506\",\"2.757075071334839\",\"0.9023529887199402\"],[\"8.29393482208252\",\"1.1071064472198486\",\"2.03399395942688\"],[\"-0.7754912972450256\",\"-1.647324562072754\",\"-0.6113331913948059\"],[\"0.07087723910808563\",\"1.5942802429199219\",\"1.4610432386398315\"]],\"type\":\"tabular\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + powerful feature extraction model for natural language processing tasks.\",\"id\":\"thenlper/gte-large\"},{\"description\":\"A + strong feature extraction model for retrieval.\",\"id\":\"Alibaba-NLP/gte-Qwen1.5-7B-instruct\"}],\"spaces\":[{\"description\":\"A + leaderboard to rank text feature extraction models based on a benchmark.\",\"id\":\"mteb/leaderboard\"},{\"description\":\"A + leaderboard to rank best feature extraction models based on human feedback.\",\"id\":\"mteb/arena\"}],\"summary\":\"Feature + extraction is the task of extracting features learnt in a model.\",\"widgetModels\":[\"facebook/bart-base\"],\"id\":\"feature-extraction\",\"label\":\"Feature + Extraction\",\"libraries\":[\"sentence-transformers\",\"transformers\",\"transformers.js\"]},\"fill-mask\":{\"datasets\":[{\"description\":\"A + common dataset that is used to train models for many languages.\",\"id\":\"wikipedia\"},{\"description\":\"A + large English dataset with text crawled from the web.\",\"id\":\"c4\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"The + barked at me\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"wolf\",\"score\":0.487},{\"label\":\"dog\",\"score\":0.061},{\"label\":\"cat\",\"score\":0.058},{\"label\":\"fox\",\"score\":0.047},{\"label\":\"squirrel\",\"score\":0.025}]}]},\"metrics\":[{\"description\":\"Cross + Entropy is a metric that calculates the difference between two probability + distributions. Each probability distribution is the distribution of predicted + words\",\"id\":\"cross_entropy\"},{\"description\":\"Perplexity is the exponential + of the cross-entropy loss. It evaluates the probabilities assigned to the + next word by the model. Lower perplexity indicates better performance\",\"id\":\"perplexity\"}],\"models\":[{\"description\":\"The + famous BERT model.\",\"id\":\"google-bert/bert-base-uncased\"},{\"description\":\"A + multilingual model trained on 100 languages.\",\"id\":\"FacebookAI/xlm-roberta-base\"}],\"spaces\":[],\"summary\":\"Masked + language modeling is the task of masking some of the words in a sentence and + predicting which words should replace those masks. These models are useful + when we want to get a statistical understanding of the language in which the + model is trained in.\",\"widgetModels\":[\"distilroberta-base\"],\"youtubeId\":\"mqElG5QJWUg\",\"id\":\"fill-mask\",\"label\":\"Fill-Mask\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"image-classification\":{\"datasets\":[{\"description\":\"Benchmark + dataset used for image classification with images that belong to 100 classes.\",\"id\":\"cifar100\"},{\"description\":\"Dataset + consisting of images of garments.\",\"id\":\"fashion_mnist\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-classification-input.jpeg\",\"type\":\"img\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Egyptian + cat\",\"score\":0.514},{\"label\":\"Tabby cat\",\"score\":0.193},{\"label\":\"Tiger + cat\",\"score\":0.068}]}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + strong image classification model.\",\"id\":\"google/vit-base-patch16-224\"},{\"description\":\"A + robust image classification model.\",\"id\":\"facebook/deit-base-distilled-patch16-224\"},{\"description\":\"A + strong image classification model.\",\"id\":\"facebook/convnext-large-224\"}],\"spaces\":[{\"description\":\"An + application that classifies what a given image is about.\",\"id\":\"nielsr/perceiver-image-classification\"}],\"summary\":\"Image + classification is the task of assigning a label or class to an entire image. + Images are expected to have only one class for each image. Image classification + models take an image as input and return a prediction about which class the + image belongs to.\",\"widgetModels\":[\"google/vit-base-patch16-224\"],\"youtubeId\":\"tjAIM7BOYhw\",\"id\":\"image-classification\",\"label\":\"Image + Classification\",\"libraries\":[\"keras\",\"timm\",\"transformers\",\"transformers.js\"]},\"image-feature-extraction\":{\"datasets\":[{\"description\":\"ImageNet-1K + is a image classification dataset in which images are used to train image-feature-extraction + models.\",\"id\":\"imagenet-1k\"}],\"demo\":{\"inputs\":[{\"filename\":\"mask-generation-input.png\",\"type\":\"img\"}],\"outputs\":[{\"table\":[[\"Dimension + 1\",\"Dimension 2\",\"Dimension 3\"],[\"0.21236686408519745\",\"1.0919708013534546\",\"0.8512550592422485\"],[\"0.809657871723175\",\"-0.18544459342956543\",\"-0.7851548194885254\"],[\"1.3103108406066895\",\"-0.2479034662246704\",\"-0.9107287526130676\"],[\"1.8536205291748047\",\"-0.36419737339019775\",\"0.09717650711536407\"]],\"type\":\"tabular\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + powerful image feature extraction model.\",\"id\":\"timm/vit_large_patch14_dinov2.lvd142m\"},{\"description\":\"A + strong image feature extraction model.\",\"id\":\"nvidia/MambaVision-T-1K\"},{\"description\":\"A + robust image feature extraction model.\",\"id\":\"facebook/dino-vitb16\"},{\"description\":\"Strong + image feature extraction model made for information retrieval from documents.\",\"id\":\"vidore/colpali\"},{\"description\":\"Strong + image feature extraction model that can be used on images and documents.\",\"id\":\"OpenGVLab/InternViT-6B-448px-V1-2\"}],\"spaces\":[],\"summary\":\"Image + feature extraction is the task of extracting features learnt in a computer + vision model.\",\"widgetModels\":[],\"id\":\"image-feature-extraction\",\"label\":\"Image + Feature Extraction\",\"libraries\":[\"timm\",\"transformers\"]},\"image-segmentation\":{\"datasets\":[{\"description\":\"Scene + segmentation dataset.\",\"id\":\"scene_parse_150\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-segmentation-input.jpeg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"image-segmentation-output.png\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"Average + Precision (AP) is the Area Under the PR Curve (AUC-PR). It is calculated for + each semantic class separately\",\"id\":\"Average Precision\"},{\"description\":\"Mean + Average Precision (mAP) is the overall average of the AP values\",\"id\":\"Mean + Average Precision\"},{\"description\":\"Intersection over Union (IoU) is the + overlap of segmentation masks. Mean IoU is the average of the IoU of all semantic + classes\",\"id\":\"Mean Intersection over Union\"},{\"description\":\"AP\u03B1 + is the Average Precision at the IoU threshold of a \u03B1 value, for example, + AP50 and AP75\",\"id\":\"AP\u03B1\"}],\"models\":[{\"description\":\"Solid + semantic segmentation model trained on ADE20k.\",\"id\":\"openmmlab/upernet-convnext-small\"},{\"description\":\"Background + removal model.\",\"id\":\"briaai/RMBG-1.4\"},{\"description\":\"A multipurpose + image segmentation model for high resolution images.\",\"id\":\"ZhengPeng7/BiRefNet\"},{\"description\":\"Powerful + human-centric image segmentation model.\",\"id\":\"facebook/sapiens-seg-1b\"},{\"description\":\"Panoptic + segmentation model trained on the COCO (common objects) dataset.\",\"id\":\"facebook/mask2former-swin-large-coco-panoptic\"}],\"spaces\":[{\"description\":\"A + semantic segmentation application that can predict unseen instances out of + the box.\",\"id\":\"facebook/ov-seg\"},{\"description\":\"One of the strongest + segmentation applications.\",\"id\":\"jbrinkma/segment-anything\"},{\"description\":\"A + human-centric segmentation model.\",\"id\":\"facebook/sapiens-pose\"},{\"description\":\"An + instance segmentation application to predict neuronal cell types from microscopy + images.\",\"id\":\"rashmi/sartorius-cell-instance-segmentation\"},{\"description\":\"An + application that segments videos.\",\"id\":\"ArtGAN/Segment-Anything-Video\"},{\"description\":\"An + panoptic segmentation application built for outdoor environments.\",\"id\":\"segments/panoptic-segment-anything\"}],\"summary\":\"Image + Segmentation divides an image into segments where each pixel in the image + is mapped to an object. This task has multiple variants such as instance segmentation, + panoptic segmentation and semantic segmentation.\",\"widgetModels\":[\"nvidia/segformer-b0-finetuned-ade-512-512\"],\"youtubeId\":\"dKE8SIt9C-w\",\"id\":\"image-segmentation\",\"label\":\"Image + Segmentation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"image-to-image\":{\"datasets\":[{\"description\":\"Synthetic + dataset, for image relighting\",\"id\":\"VIDIT\"},{\"description\":\"Multiple + images of celebrities, used for facial expression translation\",\"id\":\"huggan/CelebA-faces\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-to-image-input.jpeg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"image-to-image-output.png\",\"type\":\"img\"}]},\"isPlaceholder\":false,\"metrics\":[{\"description\":\"Peak + Signal to Noise Ratio (PSNR) is an approximation of the human perception, + considering the ratio of the absolute intensity with respect to the variations. + Measured in dB, a high value indicates a high fidelity.\",\"id\":\"PSNR\"},{\"description\":\"Structural + Similarity Index (SSIM) is a perceptual metric which compares the luminance, + contrast and structure of two images. The values of SSIM range between -1 + and 1, and higher values indicate closer resemblance to the original image.\",\"id\":\"SSIM\"},{\"description\":\"Inception + Score (IS) is an analysis of the labels predicted by an image classification + model when presented with a sample of the generated images.\",\"id\":\"IS\"}],\"models\":[{\"description\":\"An + image-to-image model to improve image resolution.\",\"id\":\"fal/AuraSR-v2\"},{\"description\":\"A + model that increases the resolution of an image.\",\"id\":\"keras-io/super-resolution\"},{\"description\":\"A + model that creates a set of variations of the input image in the style of + DALL-E using Stable Diffusion.\",\"id\":\"lambdalabs/sd-image-variations-diffusers\"},{\"description\":\"A + model that generates images based on segments in the input image and the text + prompt.\",\"id\":\"mfidabel/controlnet-segment-anything\"},{\"description\":\"A + model that takes an image and an instruction to edit the image.\",\"id\":\"timbrooks/instruct-pix2pix\"}],\"spaces\":[{\"description\":\"Image + enhancer application for low light.\",\"id\":\"keras-io/low-light-image-enhancement\"},{\"description\":\"Style + transfer application.\",\"id\":\"keras-io/neural-style-transfer\"},{\"description\":\"An + application that generates images based on segment control.\",\"id\":\"mfidabel/controlnet-segment-anything\"},{\"description\":\"Image + generation application that takes image control and text prompt.\",\"id\":\"hysts/ControlNet\"},{\"description\":\"Colorize + any image using this app.\",\"id\":\"ioclab/brightness-controlnet\"},{\"description\":\"Edit + images with instructions.\",\"id\":\"timbrooks/instruct-pix2pix\"}],\"summary\":\"Image-to-image + is the task of transforming an input image through a variety of possible manipulations + and enhancements, such as super-resolution, image inpainting, colorization, + and more.\",\"widgetModels\":[\"stabilityai/stable-diffusion-2-inpainting\"],\"youtubeId\":\"\",\"id\":\"image-to-image\",\"label\":\"Image-to-Image\",\"libraries\":[\"diffusers\",\"transformers\",\"transformers.js\"]},\"image-text-to-text\":{\"datasets\":[{\"description\":\"Instructions + composed of image and text.\",\"id\":\"liuhaotian/LLaVA-Instruct-150K\"},{\"description\":\"Conversation + turns where questions involve image and text.\",\"id\":\"liuhaotian/LLaVA-Pretrain\"},{\"description\":\"A + collection of datasets made for model fine-tuning.\",\"id\":\"HuggingFaceM4/the_cauldron\"},{\"description\":\"Screenshots + of websites with their HTML/CSS codes.\",\"id\":\"HuggingFaceM4/WebSight\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-text-to-text-input.png\",\"type\":\"img\"},{\"label\":\"Text + Prompt\",\"content\":\"Describe the position of the bee in detail.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"The + bee is sitting on a pink flower, surrounded by other flowers. The bee is positioned + in the center of the flower, with its head and front legs sticking out.\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"Powerful + vision language model with great visual understanding and reasoning capabilities.\",\"id\":\"meta-llama/Llama-3.2-11B-Vision-Instruct\"},{\"description\":\"Cutting-edge + vision language models.\",\"id\":\"allenai/Molmo-7B-D-0924\"},{\"description\":\"Small + yet powerful model.\",\"id\":\"vikhyatk/moondream2\"},{\"description\":\"Strong + image-text-to-text model.\",\"id\":\"Qwen/Qwen2-VL-7B-Instruct\"},{\"description\":\"Strong + image-text-to-text model.\",\"id\":\"mistralai/Pixtral-12B-2409\"},{\"description\":\"Strong + image-text-to-text model focused on documents.\",\"id\":\"stepfun-ai/GOT-OCR2_0\"}],\"spaces\":[{\"description\":\"Leaderboard + to evaluate vision language models.\",\"id\":\"opencompass/open_vlm_leaderboard\"},{\"description\":\"Vision + language models arena, where models are ranked by votes of users.\",\"id\":\"WildVision/vision-arena\"},{\"description\":\"Powerful + vision-language model assistant.\",\"id\":\"akhaliq/Molmo-7B-D-0924\"},{\"description\":\"An + image-text-to-text application focused on documents.\",\"id\":\"stepfun-ai/GOT_official_online_demo\"},{\"description\":\"An + application to compare outputs of different vision language models.\",\"id\":\"merve/compare_VLMs\"},{\"description\":\"An + application for chatting with an image-text-to-text model.\",\"id\":\"GanymedeNil/Qwen2-VL-7B\"}],\"summary\":\"Image-text-to-text + models take in an image and text prompt and output text. These models are + also called vision-language models, or VLMs. The difference from image-to-text + models is that these models take an additional text input, not restricting + the model to certain use cases like image captioning, and may also be trained + to accept a conversation as input.\",\"widgetModels\":[\"meta-llama/Llama-3.2-11B-Vision-Instruct\"],\"youtubeId\":\"IoGaGfU1CIg\",\"id\":\"image-text-to-text\",\"label\":\"Image-Text-to-Text\",\"libraries\":[\"transformers\"]},\"image-to-text\":{\"datasets\":[{\"description\":\"Dataset + from 12M image-text of Reddit\",\"id\":\"red_caps\"},{\"description\":\"Dataset + from 3.3M images of Google\",\"id\":\"datasets/conceptual_captions\"}],\"demo\":{\"inputs\":[{\"filename\":\"savanna.jpg\",\"type\":\"img\"}],\"outputs\":[{\"label\":\"Detailed + description\",\"content\":\"a herd of giraffes and zebras grazing in a field\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + robust image captioning model.\",\"id\":\"Salesforce/blip2-opt-2.7b\"},{\"description\":\"A + powerful and accurate image-to-text model that can also localize concepts + in images.\",\"id\":\"microsoft/kosmos-2-patch14-224\"},{\"description\":\"A + strong optical character recognition model.\",\"id\":\"facebook/nougat-base\"},{\"description\":\"A + powerful model that lets you have a conversation with the image.\",\"id\":\"llava-hf/llava-1.5-7b-hf\"}],\"spaces\":[{\"description\":\"An + application that compares various image captioning models.\",\"id\":\"nielsr/comparing-captioning-models\"},{\"description\":\"A + robust image captioning application.\",\"id\":\"flax-community/image-captioning\"},{\"description\":\"An + application that transcribes handwritings into text.\",\"id\":\"nielsr/TrOCR-handwritten\"},{\"description\":\"An + application that can caption images and answer questions about a given image.\",\"id\":\"Salesforce/BLIP\"},{\"description\":\"An + application that can caption images and answer questions with a conversational + agent.\",\"id\":\"Salesforce/BLIP2\"},{\"description\":\"An image captioning + application that demonstrates the effect of noise on captions.\",\"id\":\"johko/capdec-image-captioning\"}],\"summary\":\"Image + to text models output a text from a given image. Image captioning or optical + character recognition can be considered as the most common applications of + image to text.\",\"widgetModels\":[\"Salesforce/blip-image-captioning-large\"],\"youtubeId\":\"\",\"id\":\"image-to-text\",\"label\":\"Image-to-Text\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"keypoint-detection\":{\"datasets\":[{\"description\":\"A + dataset of hand keypoints of over 500k examples.\",\"id\":\"Vincent-luo/hagrid-mediapipe-hands\"}],\"demo\":{\"inputs\":[{\"filename\":\"keypoint-detection-input.png\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"keypoint-detection-output.png\",\"type\":\"img\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + robust keypoint detection model.\",\"id\":\"magic-leap-community/superpoint\"},{\"description\":\"Strong + keypoint detection model used to detect human pose.\",\"id\":\"facebook/sapiens-pose-1b\"}],\"spaces\":[{\"description\":\"An + application that detects hand keypoints in real-time.\",\"id\":\"datasciencedojo/Hand-Keypoint-Detection-Realtime\"},{\"description\":\"An + application to try a universal keypoint detection model.\",\"id\":\"merve/SuperPoint\"}],\"summary\":\"Keypoint + detection is the task of identifying meaningful distinctive points or features + in an image.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"keypoint-detection\",\"label\":\"Keypoint + Detection\",\"libraries\":[\"transformers\"]},\"mask-generation\":{\"datasets\":[],\"demo\":{\"inputs\":[{\"filename\":\"mask-generation-input.png\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"mask-generation-output.png\",\"type\":\"img\"}]},\"metrics\":[],\"models\":[{\"description\":\"Small + yet powerful mask generation model.\",\"id\":\"Zigeng/SlimSAM-uniform-50\"},{\"description\":\"Very + strong mask generation model.\",\"id\":\"facebook/sam2-hiera-large\"}],\"spaces\":[{\"description\":\"An + application that combines a mask generation model with a zero-shot object + detection model for text-guided image segmentation.\",\"id\":\"merve/OWLSAM2\"},{\"description\":\"An + application that compares the performance of a large and a small mask generation + model.\",\"id\":\"merve/slimsam\"},{\"description\":\"An application based + on an improved mask generation model.\",\"id\":\"SkalskiP/segment-anything-model-2\"},{\"description\":\"An + application to remove objects from videos using mask generation models.\",\"id\":\"SkalskiP/SAM_and_ProPainter\"}],\"summary\":\"Mask + generation is the task of generating masks that identify a specific object + or region of interest in a given image. Masks are often used in segmentation + tasks, where they provide a precise way to isolate the object of interest + for further processing or analysis.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"mask-generation\",\"label\":\"Mask + Generation\",\"libraries\":[\"transformers\"]},\"object-detection\":{\"datasets\":[{\"description\":\"Widely + used benchmark dataset for multiple vision tasks.\",\"id\":\"merve/coco2017\"},{\"description\":\"Multi-task + computer vision benchmark.\",\"id\":\"merve/pascal-voc\"}],\"demo\":{\"inputs\":[{\"filename\":\"object-detection-input.jpg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"object-detection-output.jpg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + Average Precision (AP) metric is the Area Under the PR Curve (AUC-PR). It + is calculated for each class separately\",\"id\":\"Average Precision\"},{\"description\":\"The + Mean Average Precision (mAP) metric is the overall average of the AP values\",\"id\":\"Mean + Average Precision\"},{\"description\":\"The AP\u03B1 metric is the Average + Precision at the IoU threshold of a \u03B1 value, for example, AP50 and AP75\",\"id\":\"AP\u03B1\"}],\"models\":[{\"description\":\"Solid + object detection model pre-trained on the COCO 2017 dataset.\",\"id\":\"facebook/detr-resnet-50\"},{\"description\":\"Real-time + and accurate object detection model.\",\"id\":\"jameslahm/yolov10x\"},{\"description\":\"Fast + and accurate object detection model trained on COCO and Object365 datasets.\",\"id\":\"PekingU/rtdetr_r18vd_coco_o365\"}],\"spaces\":[{\"description\":\"Leaderboard + to compare various object detection models across several metrics.\",\"id\":\"hf-vision/object_detection_leaderboard\"},{\"description\":\"An + application that contains various object detection models to try from.\",\"id\":\"Gradio-Blocks/Object-Detection-With-DETR-and-YOLOS\"},{\"description\":\"An + application that shows multiple cutting edge techniques for object detection + and tracking.\",\"id\":\"kadirnar/torchyolo\"},{\"description\":\"An object + tracking, segmentation and inpainting application.\",\"id\":\"VIPLab/Track-Anything\"},{\"description\":\"Very + fast object tracking application based on object detection.\",\"id\":\"merve/RT-DETR-tracking-coco\"}],\"summary\":\"Object + Detection models allow users to identify objects of certain defined classes. + Object detection models receive an image as input and output the images with + bounding boxes and labels on detected objects.\",\"widgetModels\":[\"facebook/detr-resnet-50\"],\"youtubeId\":\"WdAeKSOpxhw\",\"id\":\"object-detection\",\"label\":\"Object + Detection\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"video-classification\":{\"datasets\":[{\"description\":\"Benchmark + dataset used for video classification with videos that belong to 400 classes.\",\"id\":\"kinetics400\"}],\"demo\":{\"inputs\":[{\"filename\":\"video-classification-input.gif\",\"type\":\"img\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Playing + Guitar\",\"score\":0.514},{\"label\":\"Playing Tennis\",\"score\":0.193},{\"label\":\"Cooking\",\"score\":0.068}]}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"Strong + Video Classification model trained on the Kinetics 400 dataset.\",\"id\":\"google/vivit-b-16x2-kinetics400\"},{\"description\":\"Strong + Video Classification model trained on the Kinetics 400 dataset.\",\"id\":\"microsoft/xclip-base-patch32\"}],\"spaces\":[{\"description\":\"An + application that classifies video at different timestamps.\",\"id\":\"nateraw/lavila\"},{\"description\":\"An + application that classifies video.\",\"id\":\"fcakyon/video-classification\"}],\"summary\":\"Video + classification is the task of assigning a label or class to an entire video. + Videos are expected to have only one class for each video. Video classification + models take a video as input and return a prediction about which class the + video belongs to.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"video-classification\",\"label\":\"Video + Classification\",\"libraries\":[\"transformers\"]},\"question-answering\":{\"datasets\":[{\"description\":\"A + famous question answering dataset based on English articles from Wikipedia.\",\"id\":\"squad_v2\"},{\"description\":\"A + dataset of aggregated anonymized actual queries issued to the Google search + engine.\",\"id\":\"natural_questions\"}],\"demo\":{\"inputs\":[{\"label\":\"Question\",\"content\":\"Which + name is also used to describe the Amazon rainforest in English?\",\"type\":\"text\"},{\"label\":\"Context\",\"content\":\"The + Amazon rainforest, also known in English as Amazonia or the Amazon Jungle\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"Amazonia\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Exact + Match is a metric based on the strict character match of the predicted answer + and the right answer. For answers predicted correctly, the Exact Match will + be 1. Even if only one character is different, Exact Match will be 0\",\"id\":\"exact-match\"},{\"description\":\" + The F1-Score metric is useful if we value both false positives and false negatives + equally. The F1-Score is calculated on each word in the predicted sequence + against the correct answer\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + robust baseline model for most question answering domains.\",\"id\":\"deepset/roberta-base-squad2\"},{\"description\":\"Small + yet robust model that can answer questions.\",\"id\":\"distilbert/distilbert-base-cased-distilled-squad\"},{\"description\":\"A + special model that can answer questions from tables.\",\"id\":\"google/tapas-base-finetuned-wtq\"}],\"spaces\":[{\"description\":\"An + application that can answer a long question from Wikipedia.\",\"id\":\"deepset/wikipedia-assistant\"}],\"summary\":\"Question + Answering models can retrieve the answer to a question from a given text, + which is useful for searching for an answer in a document. Some question answering + models can generate answers without context!\",\"widgetModels\":[\"deepset/roberta-base-squad2\"],\"youtubeId\":\"ajPx5LwJD-I\",\"id\":\"question-answering\",\"label\":\"Question + Answering\",\"libraries\":[\"adapter-transformers\",\"allennlp\",\"transformers\",\"transformers.js\"]},\"reinforcement-learning\":{\"datasets\":[{\"description\":\"A + curation of widely used datasets for Data Driven Deep Reinforcement Learning + (D4RL)\",\"id\":\"edbeeching/decision_transformer_gym_replay\"}],\"demo\":{\"inputs\":[{\"label\":\"State\",\"content\":\"Red + traffic light, pedestrians are about to pass.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Action\",\"content\":\"Stop + the car.\",\"type\":\"text\"},{\"label\":\"Next State\",\"content\":\"Yellow + light, pedestrians have crossed.\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Accumulated + reward across all time steps discounted by a factor that ranges between 0 + and 1 and determines how much the agent optimizes for future relative to immediate + rewards. Measures how good is the policy ultimately found by a given algorithm + considering uncertainty over the future.\",\"id\":\"Discounted Total Reward\"},{\"description\":\"Average + return obtained after running the policy for a certain number of evaluation + episodes. As opposed to total reward, mean reward considers how much reward + a given algorithm receives while learning.\",\"id\":\"Mean Reward\"},{\"description\":\"Measures + how good a given algorithm is after a predefined time. Some algorithms may + be guaranteed to converge to optimal behavior across many time steps. However, + an agent that reaches an acceptable level of optimality after a given time + horizon may be preferable to one that ultimately reaches optimality but takes + a long time.\",\"id\":\"Level of Performance After Some Time\"}],\"models\":[{\"description\":\"A + Reinforcement Learning model trained on expert data from the Gym Hopper environment\",\"id\":\"edbeeching/decision-transformer-gym-hopper-expert\"},{\"description\":\"A + PPO agent playing seals/CartPole-v0 using the stable-baselines3 library and + the RL Zoo.\",\"id\":\"HumanCompatibleAI/ppo-seals-CartPole-v0\"}],\"spaces\":[{\"description\":\"An + application for a cute puppy agent learning to catch a stick.\",\"id\":\"ThomasSimonini/Huggy\"},{\"description\":\"An + application to play Snowball Fight with a reinforcement learning agent.\",\"id\":\"ThomasSimonini/SnowballFight\"}],\"summary\":\"Reinforcement + learning is the computational approach of learning from action by interacting + with an environment through trial and error and receiving rewards (negative + or positive) as feedback\",\"widgetModels\":[],\"youtubeId\":\"q0BiUn5LiBc\",\"id\":\"reinforcement-learning\",\"label\":\"Reinforcement + Learning\",\"libraries\":[\"transformers\",\"stable-baselines3\",\"ml-agents\",\"sample-factory\"]},\"sentence-similarity\":{\"datasets\":[{\"description\":\"Bing + queries with relevant passages from various web sources.\",\"id\":\"ms_marco\"}],\"demo\":{\"inputs\":[{\"label\":\"Source + sentence\",\"content\":\"Machine learning is so easy.\",\"type\":\"text\"},{\"label\":\"Sentences + to compare to\",\"content\":\"Deep learning is so straightforward.\",\"type\":\"text\"},{\"label\":\"\",\"content\":\"This + is so difficult, like rocket science.\",\"type\":\"text\"},{\"label\":\"\",\"content\":\"I + can't believe how much I struggled with this.\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Deep + learning is so straightforward.\",\"score\":0.623},{\"label\":\"This is so + difficult, like rocket science.\",\"score\":0.413},{\"label\":\"I can't believe + how much I struggled with this.\",\"score\":0.256}]}]},\"metrics\":[{\"description\":\"Reciprocal + Rank is a measure used to rank the relevancy of documents given a set of documents. + Reciprocal Rank is the reciprocal of the rank of the document retrieved, meaning, + if the rank is 3, the Reciprocal Rank is 0.33. If the rank is 1, the Reciprocal + Rank is 1\",\"id\":\"Mean Reciprocal Rank\"},{\"description\":\"The similarity + of the embeddings is evaluated mainly on cosine similarity. It is calculated + as the cosine of the angle between two vectors. It is particularly useful + when your texts are not the same length\",\"id\":\"Cosine Similarity\"}],\"models\":[{\"description\":\"This + model works well for sentences and paragraphs and can be used for clustering/grouping + and semantic searches.\",\"id\":\"sentence-transformers/all-mpnet-base-v2\"},{\"description\":\"A + multilingual robust sentence similarity model..\",\"id\":\"BAAI/bge-m3\"}],\"spaces\":[{\"description\":\"An + application that leverages sentence similarity to answer questions from YouTube + videos.\",\"id\":\"Gradio-Blocks/Ask_Questions_To_YouTube_Videos\"},{\"description\":\"An + application that retrieves relevant PubMed abstracts for a given online article + which can be used as further references.\",\"id\":\"Gradio-Blocks/pubmed-abstract-retriever\"},{\"description\":\"An + application that leverages sentence similarity to summarize text.\",\"id\":\"nickmuchi/article-text-summarizer\"},{\"description\":\"A + guide that explains how Sentence Transformers can be used for semantic search.\",\"id\":\"sentence-transformers/Sentence_Transformers_for_semantic_search\"}],\"summary\":\"Sentence + Similarity is the task of determining how similar two texts are. Sentence + similarity models convert input texts into vectors (embeddings) that capture + semantic information and calculate how close (similar) they are between them. + This task is particularly useful for information retrieval and clustering/grouping.\",\"widgetModels\":[\"BAAI/bge-small-en-v1.5\"],\"youtubeId\":\"VCZq5AkbNEU\",\"id\":\"sentence-similarity\",\"label\":\"Sentence + Similarity\",\"libraries\":[\"sentence-transformers\",\"spacy\",\"transformers.js\"]},\"summarization\":{\"canonicalId\":\"text2text-generation\",\"datasets\":[{\"description\":\"News + articles in five different languages along with their summaries. Widely used + for benchmarking multilingual summarization models.\",\"id\":\"mlsum\"},{\"description\":\"English + conversations and their summaries. Useful for benchmarking conversational + agents.\",\"id\":\"samsum\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"The + tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey + building, and the tallest structure in Paris. Its base is square, measuring + 125 metres (410 ft) on each side. It was the first structure to reach a height + of 300 metres. Excluding transmitters, the Eiffel Tower is the second tallest + free-standing structure in France after the Millau Viaduct.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Output\",\"content\":\"The + tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey + building. It was the first structure to reach a height of 300 metres.\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"The + generated sequence is compared against its summary, and the overlap of tokens + are counted. ROUGE-N refers to overlap of N subsequent tokens, ROUGE-1 refers + to overlap of single tokens and ROUGE-2 is the overlap of two subsequent tokens.\",\"id\":\"rouge\"}],\"models\":[{\"description\":\"A + strong summarization model trained on English news articles. Excels at generating + factual summaries.\",\"id\":\"facebook/bart-large-cnn\"},{\"description\":\"A + summarization model trained on medical articles.\",\"id\":\"Falconsai/medical_summarization\"}],\"spaces\":[{\"description\":\"An + application that can summarize long paragraphs.\",\"id\":\"pszemraj/summarize-long-text\"},{\"description\":\"A + much needed summarization application for terms and conditions.\",\"id\":\"ml6team/distilbart-tos-summarizer-tosdr\"},{\"description\":\"An + application that summarizes long documents.\",\"id\":\"pszemraj/document-summarization\"},{\"description\":\"An + application that can detect errors in abstractive summarization.\",\"id\":\"ml6team/post-processing-summarization\"}],\"summary\":\"Summarization + is the task of producing a shorter version of a document while preserving + its important information. Some models can extract text from the original + input, while other models can generate entirely new text.\",\"widgetModels\":[\"facebook/bart-large-cnn\"],\"youtubeId\":\"yHnr5Dk2zCI\",\"id\":\"summarization\",\"label\":\"Summarization\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"table-question-answering\":{\"datasets\":[{\"description\":\"The + WikiTableQuestions dataset is a large-scale dataset for the task of question + answering on semi-structured tables.\",\"id\":\"wikitablequestions\"},{\"description\":\"WikiSQL + is a dataset of 80654 hand-annotated examples of questions and SQL queries + distributed across 24241 tables from Wikipedia.\",\"id\":\"wikisql\"}],\"demo\":{\"inputs\":[{\"table\":[[\"Rank\",\"Name\",\"No.of + reigns\",\"Combined days\"],[\"1\",\"lou Thesz\",\"3\",\"3749\"],[\"2\",\"Ric + Flair\",\"8\",\"3103\"],[\"3\",\"Harley Race\",\"7\",\"1799\"]],\"type\":\"tabular\"},{\"label\":\"Question\",\"content\":\"What + is the number of reigns for Harley Race?\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Result\",\"content\":\"7\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Checks + whether the predicted answer(s) is the same as the ground-truth answer(s).\",\"id\":\"Denotation + Accuracy\"}],\"models\":[{\"description\":\"A table question answering model + that is capable of neural SQL execution, i.e., employ TAPEX to execute a SQL + query on a given table.\",\"id\":\"microsoft/tapex-base\"},{\"description\":\"A + robust table question answering model.\",\"id\":\"google/tapas-base-finetuned-wtq\"}],\"spaces\":[{\"description\":\"An + application that answers questions based on table CSV files.\",\"id\":\"katanaml/table-query\"}],\"summary\":\"Table + Question Answering (Table QA) is the answering a question about an information + on a given table.\",\"widgetModels\":[\"google/tapas-base-finetuned-wtq\"],\"id\":\"table-question-answering\",\"label\":\"Table + Question Answering\",\"libraries\":[\"transformers\"]},\"tabular-classification\":{\"datasets\":[{\"description\":\"A + comprehensive curation of datasets covering all benchmarks.\",\"id\":\"inria-soda/tabular-benchmark\"}],\"demo\":{\"inputs\":[{\"table\":[[\"Glucose\",\"Blood + Pressure \",\"Skin Thickness\",\"Insulin\",\"BMI\"],[\"148\",\"72\",\"35\",\"0\",\"33.6\"],[\"150\",\"50\",\"30\",\"0\",\"35.1\"],[\"141\",\"60\",\"29\",\"1\",\"39.2\"]],\"type\":\"tabular\"}],\"outputs\":[{\"table\":[[\"Diabetes\"],[\"1\"],[\"1\"],[\"0\"]],\"type\":\"tabular\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"Breast + cancer prediction model based on decision trees.\",\"id\":\"scikit-learn/cancer-prediction-trees\"}],\"spaces\":[{\"description\":\"An + application that can predict defective products on a production line.\",\"id\":\"scikit-learn/tabular-playground\"},{\"description\":\"An + application that compares various tabular classification techniques on different + datasets.\",\"id\":\"scikit-learn/classification\"}],\"summary\":\"Tabular + classification is the task of classifying a target category (a group) based + on set of attributes.\",\"widgetModels\":[\"scikit-learn/tabular-playground\"],\"youtubeId\":\"\",\"id\":\"tabular-classification\",\"label\":\"Tabular + Classification\",\"libraries\":[\"sklearn\"]},\"tabular-regression\":{\"datasets\":[{\"description\":\"A + comprehensive curation of datasets covering all benchmarks.\",\"id\":\"inria-soda/tabular-benchmark\"}],\"demo\":{\"inputs\":[{\"table\":[[\"Car + Name\",\"Horsepower\",\"Weight\"],[\"ford torino\",\"140\",\"3,449\"],[\"amc + hornet\",\"97\",\"2,774\"],[\"toyota corolla\",\"65\",\"1,773\"]],\"type\":\"tabular\"}],\"outputs\":[{\"table\":[[\"MPG + (miles per gallon)\"],[\"17\"],[\"18\"],[\"31\"]],\"type\":\"tabular\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"mse\"},{\"description\":\"Coefficient + of determination (or R-squared) is a measure of how well the model fits the + data. Higher R-squared is considered a better fit.\",\"id\":\"r-squared\"}],\"models\":[{\"description\":\"Fish + weight prediction based on length measurements and species.\",\"id\":\"scikit-learn/Fish-Weight\"}],\"spaces\":[{\"description\":\"An + application that can predict weight of a fish based on set of attributes.\",\"id\":\"scikit-learn/fish-weight-prediction\"}],\"summary\":\"Tabular + regression is the task of predicting a numerical value given a set of attributes.\",\"widgetModels\":[\"scikit-learn/Fish-Weight\"],\"youtubeId\":\"\",\"id\":\"tabular-regression\",\"label\":\"Tabular + Regression\",\"libraries\":[\"sklearn\"]},\"text-classification\":{\"datasets\":[{\"description\":\"A + widely used dataset used to benchmark multiple variants of text classification.\",\"id\":\"nyu-mll/glue\"},{\"description\":\"A + text classification dataset used to benchmark natural language inference models\",\"id\":\"stanfordnlp/snli\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"I + love Hugging Face!\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"POSITIVE\",\"score\":0.9},{\"label\":\"NEUTRAL\",\"score\":0.1},{\"label\":\"NEGATIVE\",\"score\":0}]}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"The + F1 metric is the harmonic mean of the precision and recall. It can be calculated + as: F1 = 2 * (precision * recall) / (precision + recall)\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + robust model trained for sentiment analysis.\",\"id\":\"distilbert/distilbert-base-uncased-finetuned-sst-2-english\"},{\"description\":\"A + sentiment analysis model specialized in financial sentiment.\",\"id\":\"ProsusAI/finbert\"},{\"description\":\"A + sentiment analysis model specialized in analyzing tweets.\",\"id\":\"cardiffnlp/twitter-roberta-base-sentiment-latest\"},{\"description\":\"A + model that can classify languages.\",\"id\":\"papluca/xlm-roberta-base-language-detection\"},{\"description\":\"A + model that can classify text generation attacks.\",\"id\":\"meta-llama/Prompt-Guard-86M\"}],\"spaces\":[{\"description\":\"An + application that can classify financial sentiment.\",\"id\":\"IoannisTr/Tech_Stocks_Trading_Assistant\"},{\"description\":\"A + dashboard that contains various text classification tasks.\",\"id\":\"miesnerjacob/Multi-task-NLP\"},{\"description\":\"An + application that analyzes user reviews in healthcare.\",\"id\":\"spacy/healthsea-demo\"}],\"summary\":\"Text + Classification is the task of assigning a label or class to a given text. + Some use cases are sentiment analysis, natural language inference, and assessing + grammatical correctness.\",\"widgetModels\":[\"distilbert/distilbert-base-uncased-finetuned-sst-2-english\"],\"youtubeId\":\"leNG9fN9FQU\",\"id\":\"text-classification\",\"label\":\"Text + Classification\",\"libraries\":[\"adapter-transformers\",\"setfit\",\"spacy\",\"transformers\",\"transformers.js\"]},\"text-generation\":{\"datasets\":[{\"description\":\"A + large multilingual dataset of text crawled from the web.\",\"id\":\"mc4\"},{\"description\":\"Diverse + open-source data consisting of 22 smaller high-quality datasets. It was used + to train GPT-Neo.\",\"id\":\"the_pile\"},{\"description\":\"Truly open-source, + curated and cleaned dialogue dataset.\",\"id\":\"HuggingFaceH4/ultrachat_200k\"},{\"description\":\"An + instruction dataset with preference ratings on responses.\",\"id\":\"openbmb/UltraFeedback\"},{\"description\":\"A + large synthetic dataset for alignment of text generation models.\",\"id\":\"argilla/magpie-ultra-v0.1\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"Once + upon a time,\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Output\",\"content\":\"Once + upon a time, we knew that our ancestors were on the verge of extinction. The + great explorers and poets of the Old World, from Alexander the Great to Chaucer, + are dead and gone. A good many of our ancient explorers and poets have\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Cross + Entropy is a metric that calculates the difference between two probability + distributions. Each probability distribution is the distribution of predicted + words\",\"id\":\"Cross Entropy\"},{\"description\":\"The Perplexity metric + is the exponential of the cross-entropy loss. It evaluates the probabilities + assigned to the next word by the model. Lower perplexity indicates better + performance\",\"id\":\"Perplexity\"}],\"models\":[{\"description\":\"A text-generation + model trained to follow instructions.\",\"id\":\"google/gemma-2-2b-it\"},{\"description\":\"Very + powerful text generation model trained to follow instructions.\",\"id\":\"meta-llama/Meta-Llama-3.1-8B-Instruct\"},{\"description\":\"Small + yet powerful text generation model.\",\"id\":\"microsoft/Phi-3-mini-4k-instruct\"},{\"description\":\"A + very powerful model that can solve mathematical problems.\",\"id\":\"AI-MO/NuminaMath-7B-TIR\"},{\"description\":\"Strong + text generation model to follow instructions.\",\"id\":\"Qwen/Qwen2.5-7B-Instruct\"},{\"description\":\"Very + strong open-source large language model.\",\"id\":\"nvidia/Llama-3.1-Nemotron-70B-Instruct\"}],\"spaces\":[{\"description\":\"A + leaderboard to compare different open-source text generation models based + on various benchmarks.\",\"id\":\"open-llm-leaderboard/open_llm_leaderboard\"},{\"description\":\"A + leaderboard for comparing chain-of-thought performance of models.\",\"id\":\"logikon/open_cot_leaderboard\"},{\"description\":\"An + text generation based application based on a very powerful LLaMA2 model.\",\"id\":\"ysharma/Explore_llamav2_with_TGI\"},{\"description\":\"An + text generation based application to converse with Zephyr model.\",\"id\":\"HuggingFaceH4/zephyr-chat\"},{\"description\":\"A + leaderboard that ranks text generation models based on blind votes from people.\",\"id\":\"lmsys/chatbot-arena-leaderboard\"},{\"description\":\"An + chatbot to converse with a very powerful text generation model.\",\"id\":\"mlabonne/phixtral-chat\"}],\"summary\":\"Generating + text is the task of generating new text given another text. These models can, + for example, fill in incomplete text or paraphrase.\",\"widgetModels\":[\"mistralai/Mistral-Nemo-Instruct-2407\"],\"youtubeId\":\"e9gNEAlsOvU\",\"id\":\"text-generation\",\"label\":\"Text + Generation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"text-to-image\":{\"datasets\":[{\"description\":\"RedCaps + is a large-scale dataset of 12M image-text pairs collected from Reddit.\",\"id\":\"red_caps\"},{\"description\":\"Conceptual + Captions is a dataset consisting of ~3.3M images annotated with captions.\",\"id\":\"conceptual_captions\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"A + city above clouds, pastel colors, Victorian style\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"image.jpeg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + Inception Score (IS) measure assesses diversity and meaningfulness. It uses + a generated image sample to predict its label. A higher score signifies more + diverse and meaningful images.\",\"id\":\"IS\"},{\"description\":\"The Fr\xE9chet + Inception Distance (FID) calculates the distance between distributions between + synthetic and real samples. A lower FID score indicates better similarity + between the distributions of real and generated images.\",\"id\":\"FID\"},{\"description\":\"R-precision + assesses how the generated image aligns with the provided text description. + It uses the generated images as queries to retrieve relevant text descriptions. + The top 'r' relevant descriptions are selected and used to calculate R-precision + as r/R, where 'R' is the number of ground truth descriptions associated with + the generated images. A higher R-precision value indicates a better model.\",\"id\":\"R-Precision\"}],\"models\":[{\"description\":\"One + of the most powerful image generation models that can generate realistic outputs.\",\"id\":\"black-forest-labs/FLUX.1-dev\"},{\"description\":\"A + powerful yet fast image generation model.\",\"id\":\"latent-consistency/lcm-lora-sdxl\"},{\"description\":\"Text-to-image + model for photorealistic generation.\",\"id\":\"Kwai-Kolors/Kolors\"},{\"description\":\"A + powerful text-to-image model.\",\"id\":\"stabilityai/stable-diffusion-3-medium-diffusers\"}],\"spaces\":[{\"description\":\"A + powerful text-to-image application.\",\"id\":\"stabilityai/stable-diffusion-3-medium\"},{\"description\":\"A + text-to-image application to generate comics.\",\"id\":\"jbilcke-hf/ai-comic-factory\"},{\"description\":\"An + application to match multiple custom image generation models.\",\"id\":\"multimodalart/flux-lora-lab\"},{\"description\":\"A + powerful yet very fast image generation application.\",\"id\":\"latent-consistency/lcm-lora-for-sdxl\"},{\"description\":\"A + gallery to explore various text-to-image models.\",\"id\":\"multimodalart/LoraTheExplorer\"},{\"description\":\"An + application for `text-to-image`, `image-to-image` and image inpainting.\",\"id\":\"ArtGAN/Stable-Diffusion-ControlNet-WebUI\"},{\"description\":\"An + application to generate realistic images given photos of a person and a prompt.\",\"id\":\"InstantX/InstantID\"}],\"summary\":\"Text-to-image + is the task of generating images from input text. These pipelines can also + be used to modify and edit images based on text prompts.\",\"widgetModels\":[\"black-forest-labs/FLUX.1-dev\"],\"youtubeId\":\"\",\"id\":\"text-to-image\",\"label\":\"Text-to-Image\",\"libraries\":[\"diffusers\"]},\"text-to-speech\":{\"canonicalId\":\"text-to-audio\",\"datasets\":[{\"description\":\"10K + hours of multi-speaker English dataset.\",\"id\":\"parler-tts/mls_eng_10k\"},{\"description\":\"Multi-speaker + English dataset.\",\"id\":\"mythicinfinity/libritts_r\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"I + love audio models on the Hub!\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"audio.wav\",\"type\":\"audio\"}]},\"metrics\":[{\"description\":\"The + Mel Cepstral Distortion (MCD) metric is used to calculate the quality of generated + speech.\",\"id\":\"mel cepstral distortion\"}],\"models\":[{\"description\":\"A + powerful TTS model.\",\"id\":\"parler-tts/parler-tts-large-v1\"},{\"description\":\"A + massively multi-lingual TTS model.\",\"id\":\"coqui/XTTS-v2\"},{\"description\":\"Robust + TTS model.\",\"id\":\"metavoiceio/metavoice-1B-v0.1\"},{\"description\":\"A + prompt based, powerful TTS model.\",\"id\":\"parler-tts/parler_tts_mini_v0.1\"}],\"spaces\":[{\"description\":\"An + application for generate highly realistic, multilingual speech.\",\"id\":\"suno/bark\"},{\"description\":\"An + application on XTTS, a voice generation model that lets you clone voices into + different languages.\",\"id\":\"coqui/xtts\"},{\"description\":\"An application + that generates speech in different styles in English and Chinese.\",\"id\":\"mrfakename/E2-F5-TTS\"},{\"description\":\"An + application that synthesizes speech for diverse speaker prompts.\",\"id\":\"parler-tts/parler_tts_mini\"}],\"summary\":\"Text-to-Speech + (TTS) is the task of generating natural sounding speech given text input. + TTS models can be extended to have a single model that generates speech for + multiple speakers and multiple languages.\",\"widgetModels\":[\"suno/bark\"],\"youtubeId\":\"NW62DpzJ274\",\"id\":\"text-to-speech\",\"label\":\"Text-to-Speech\",\"libraries\":[\"espnet\",\"tensorflowtts\",\"transformers\",\"transformers.js\"]},\"text-to-video\":{\"datasets\":[{\"description\":\"Microsoft + Research Video to Text is a large-scale dataset for open domain video captioning\",\"id\":\"iejMac/CLIP-MSR-VTT\"},{\"description\":\"UCF101 + Human Actions dataset consists of 13,320 video clips from YouTube, with 101 + classes.\",\"id\":\"quchenyuan/UCF101-ZIP\"},{\"description\":\"A high-quality + dataset for human action recognition in YouTube videos.\",\"id\":\"nateraw/kinetics\"},{\"description\":\"A + dataset of video clips of humans performing pre-defined basic actions with + everyday objects.\",\"id\":\"HuggingFaceM4/something_something_v2\"},{\"description\":\"This + dataset consists of text-video pairs and contains noisy samples with irrelevant + video descriptions\",\"id\":\"HuggingFaceM4/webvid\"},{\"description\":\"A + dataset of short Flickr videos for the temporal localization of events with + descriptions.\",\"id\":\"iejMac/CLIP-DiDeMo\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"Darth + Vader is surfing on the waves.\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"text-to-video-output.gif\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"Inception + Score uses an image classification model that predicts class labels and evaluates + how distinct and diverse the images are. A higher score indicates better video + generation.\",\"id\":\"is\"},{\"description\":\"Frechet Inception Distance + uses an image classification model to obtain image embeddings. The metric + compares mean and standard deviation of the embeddings of real and generated + images. A smaller score indicates better video generation.\",\"id\":\"fid\"},{\"description\":\"Frechet + Video Distance uses a model that captures coherence for changes in frames + and the quality of each frame. A smaller score indicates better video generation.\",\"id\":\"fvd\"},{\"description\":\"CLIPSIM + measures similarity between video frames and text using an image-text similarity + model. A higher score indicates better video generation.\",\"id\":\"clipsim\"}],\"models\":[{\"description\":\"A + strong model for consistent video generation.\",\"id\":\"rain1011/pyramid-flow-sd3\"},{\"description\":\"A + robust model for text-to-video generation.\",\"id\":\"VideoCrafter/VideoCrafter2\"},{\"description\":\"A + cutting-edge text-to-video generation model.\",\"id\":\"TIGER-Lab/T2V-Turbo-V2\"}],\"spaces\":[{\"description\":\"An + application that generates video from text.\",\"id\":\"VideoCrafter/VideoCrafter\"},{\"description\":\"Consistent + video generation application.\",\"id\":\"TIGER-Lab/T2V-Turbo-V2\"},{\"description\":\"A + cutting edge video generation application.\",\"id\":\"Pyramid-Flow/pyramid-flow\"}],\"summary\":\"Text-to-video + models can be used in any application that requires generating consistent + sequence of images from text. \",\"widgetModels\":[],\"id\":\"text-to-video\",\"label\":\"Text-to-Video\",\"libraries\":[\"diffusers\"]},\"token-classification\":{\"datasets\":[{\"description\":\"A + widely used dataset useful to benchmark named entity recognition models.\",\"id\":\"eriktks/conll2003\"},{\"description\":\"A + multilingual dataset of Wikipedia articles annotated for named entity recognition + in over 150 different languages.\",\"id\":\"unimelb-nlp/wikiann\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"My + name is Omar and I live in Z\xFCrich.\",\"type\":\"text\"}],\"outputs\":[{\"text\":\"My + name is Omar and I live in Z\xFCrich.\",\"tokens\":[{\"type\":\"PERSON\",\"start\":11,\"end\":15},{\"type\":\"GPE\",\"start\":30,\"end\":36}],\"type\":\"text-with-tokens\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + robust performance model to identify people, locations, organizations and + names of miscellaneous entities.\",\"id\":\"dslim/bert-base-NER\"},{\"description\":\"A + strong model to identify people, locations, organizations and names in multiple + languages.\",\"id\":\"FacebookAI/xlm-roberta-large-finetuned-conll03-english\"},{\"description\":\"A + token classification model specialized on medical entity recognition.\",\"id\":\"blaze999/Medical-NER\"},{\"description\":\"Flair + models are typically the state of the art in named entity recognition tasks.\",\"id\":\"flair/ner-english\"}],\"spaces\":[{\"description\":\"An + application that can recognizes entities, extracts noun chunks and recognizes + various linguistic features of each token.\",\"id\":\"spacy/gradio_pipeline_visualizer\"}],\"summary\":\"Token + classification is a natural language understanding task in which a label is + assigned to some tokens in a text. Some popular token classification subtasks + are Named Entity Recognition (NER) and Part-of-Speech (PoS) tagging. NER models + could be trained to identify specific entities in a text, such as dates, individuals + and places; and PoS tagging would identify, for example, which words in a + text are verbs, nouns, and punctuation marks.\",\"widgetModels\":[\"FacebookAI/xlm-roberta-large-finetuned-conll03-english\"],\"youtubeId\":\"wVHdVlPScxA\",\"id\":\"token-classification\",\"label\":\"Token + Classification\",\"libraries\":[\"adapter-transformers\",\"flair\",\"spacy\",\"span-marker\",\"stanza\",\"transformers\",\"transformers.js\"]},\"translation\":{\"canonicalId\":\"text2text-generation\",\"datasets\":[{\"description\":\"A + dataset of copyright-free books translated into 16 different languages.\",\"id\":\"Helsinki-NLP/opus_books\"},{\"description\":\"An + example of translation between programming languages. This dataset consists + of functions in Java and C#.\",\"id\":\"google/code_x_glue_cc_code_to_code_trans\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"My + name is Omar and I live in Z\xFCrich.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Output\",\"content\":\"Mein + Name ist Omar und ich wohne in Z\xFCrich.\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"BLEU + score is calculated by counting the number of shared single or subsequent + tokens between the generated sequence and the reference. Subsequent n tokens + are called \u201Cn-grams\u201D. Unigram refers to a single token while bi-gram + refers to token pairs and n-grams refer to n subsequent tokens. The score + ranges from 0 to 1, where 1 means the translation perfectly matched and 0 + did not match at all\",\"id\":\"bleu\"},{\"description\":\"\",\"id\":\"sacrebleu\"}],\"models\":[{\"description\":\"Very + powerful model that can translate many languages between each other, especially + low-resource languages.\",\"id\":\"facebook/nllb-200-1.3B\"},{\"description\":\"A + general-purpose Transformer that can be used to translate from English to + German, French, or Romanian.\",\"id\":\"google-t5/t5-base\"}],\"spaces\":[{\"description\":\"An + application that can translate between 100 languages.\",\"id\":\"Iker/Translate-100-languages\"},{\"description\":\"An + application that can translate between many languages.\",\"id\":\"Geonmo/nllb-translation-demo\"}],\"summary\":\"Translation + is the task of converting text from one language to another.\",\"widgetModels\":[\"facebook/mbart-large-50-many-to-many-mmt\"],\"youtubeId\":\"1JvfrvZgi6c\",\"id\":\"translation\",\"label\":\"Translation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"unconditional-image-generation\":{\"datasets\":[{\"description\":\"The + CIFAR-100 dataset consists of 60000 32x32 colour images in 100 classes, with + 600 images per class.\",\"id\":\"cifar100\"},{\"description\":\"Multiple images + of celebrities, used for facial expression translation.\",\"id\":\"CelebA\"}],\"demo\":{\"inputs\":[{\"label\":\"Seed\",\"content\":\"42\",\"type\":\"text\"},{\"label\":\"Number + of images to generate:\",\"content\":\"4\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"unconditional-image-generation-output.jpeg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + inception score (IS) evaluates the quality of generated images. It measures + the diversity of the generated images (the model predictions are evenly distributed + across all possible labels) and their 'distinction' or 'sharpness' (the model + confidently predicts a single label for each image).\",\"id\":\"Inception + score (IS)\"},{\"description\":\"The Fr\xE9chet Inception Distance (FID) evaluates + the quality of images created by a generative model by calculating the distance + between feature vectors for real and generated images.\",\"id\":\"Fre\u0107het + Inception Distance (FID)\"}],\"models\":[{\"description\":\"High-quality image + generation model trained on the CIFAR-10 dataset. It synthesizes images of + the ten classes presented in the dataset using diffusion probabilistic models, + a class of latent variable models inspired by considerations from nonequilibrium + thermodynamics.\",\"id\":\"google/ddpm-cifar10-32\"},{\"description\":\"High-quality + image generation model trained on the 256x256 CelebA-HQ dataset. It synthesizes + images of faces using diffusion probabilistic models, a class of latent variable + models inspired by considerations from nonequilibrium thermodynamics.\",\"id\":\"google/ddpm-celebahq-256\"}],\"spaces\":[{\"description\":\"An + application that can generate realistic faces.\",\"id\":\"CompVis/celeba-latent-diffusion\"}],\"summary\":\"Unconditional + image generation is the task of generating images with no condition in any + context (like a prompt text or another image). Once trained, the model will + create images that resemble its training data distribution.\",\"widgetModels\":[\"\"],\"youtubeId\":\"\",\"id\":\"unconditional-image-generation\",\"label\":\"Unconditional + Image Generation\",\"libraries\":[\"diffusers\"]},\"video-text-to-text\":{\"datasets\":[{\"description\":\"Multiple-choice + questions and answers about videos.\",\"id\":\"lmms-lab/Video-MME\"},{\"description\":\"A + dataset of instructions and question-answer pairs about videos.\",\"id\":\"lmms-lab/VideoChatGPT\"},{\"description\":\"Large + video understanding dataset.\",\"id\":\"HuggingFaceFV/finevideo\"}],\"demo\":{\"inputs\":[{\"filename\":\"video-text-to-text-input.gif\",\"type\":\"img\"},{\"label\":\"Text + Prompt\",\"content\":\"What is happening in this video?\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"The + video shows a series of images showing a fountain with water jets and a variety + of colorful flowers and butterflies in the background.\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + robust video-text-to-text model that can take in image and video inputs.\",\"id\":\"llava-hf/llava-onevision-qwen2-72b-ov-hf\"},{\"description\":\"Large + and powerful video-text-to-text model that can take in image and video inputs.\",\"id\":\"llava-hf/LLaVA-NeXT-Video-34B-hf\"}],\"spaces\":[{\"description\":\"An + application to chat with a video-text-to-text model.\",\"id\":\"llava-hf/video-llava\"},{\"description\":\"A + leaderboard for various video-text-to-text models.\",\"id\":\"opencompass/openvlm_video_leaderboard\"}],\"summary\":\"Video-text-to-text + models take in a video and a text prompt and output text. These models are + also called video-language models.\",\"widgetModels\":[\"\"],\"youtubeId\":\"\",\"id\":\"video-text-to-text\",\"label\":\"Video-Text-to-Text\",\"libraries\":[\"transformers\"]},\"visual-question-answering\":{\"datasets\":[{\"description\":\"A + widely used dataset containing questions (with answers) about images.\",\"id\":\"Graphcore/vqa\"},{\"description\":\"A + dataset to benchmark visual reasoning based on text in images.\",\"id\":\"facebook/textvqa\"}],\"demo\":{\"inputs\":[{\"filename\":\"elephant.jpeg\",\"type\":\"img\"},{\"label\":\"Question\",\"content\":\"What + is in this image?\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"elephant\",\"score\":0.97},{\"label\":\"elephants\",\"score\":0.06},{\"label\":\"animal\",\"score\":0.003}]}]},\"isPlaceholder\":false,\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"Measures + how much a predicted answer differs from the ground truth based on the difference + in their semantic meaning.\",\"id\":\"wu-palmer similarity\"}],\"models\":[{\"description\":\"A + visual question answering model trained to convert charts and plots to text.\",\"id\":\"google/deplot\"},{\"description\":\"A + visual question answering model trained for mathematical reasoning and chart + derendering from images.\",\"id\":\"google/matcha-base\"},{\"description\":\"A + strong visual question answering that answers questions from book covers.\",\"id\":\"google/pix2struct-ocrvqa-large\"}],\"spaces\":[{\"description\":\"An + application that compares visual question answering models across different + tasks.\",\"id\":\"merve/pix2struct\"},{\"description\":\"An application that + can answer questions based on images.\",\"id\":\"nielsr/vilt-vqa\"},{\"description\":\"An + application that can caption images and answer questions about a given image. + \",\"id\":\"Salesforce/BLIP\"},{\"description\":\"An application that can + caption images and answer questions about a given image. \",\"id\":\"vumichien/Img2Prompt\"}],\"summary\":\"Visual + Question Answering is the task of answering open-ended questions based on + an image. They output natural language responses to natural language questions.\",\"widgetModels\":[\"dandelin/vilt-b32-finetuned-vqa\"],\"youtubeId\":\"\",\"id\":\"visual-question-answering\",\"label\":\"Visual + Question Answering\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"zero-shot-classification\":{\"datasets\":[{\"description\":\"A + widely used dataset used to benchmark multiple variants of text classification.\",\"id\":\"nyu-mll/glue\"},{\"description\":\"The + Multi-Genre Natural Language Inference (MultiNLI) corpus is a crowd-sourced + collection of 433k sentence pairs annotated with textual entailment information.\",\"id\":\"nyu-mll/multi_nli\"},{\"description\":\"FEVER + is a publicly available dataset for fact extraction and verification against + textual sources.\",\"id\":\"fever/fever\"}],\"demo\":{\"inputs\":[{\"label\":\"Text + Input\",\"content\":\"Dune is the best movie ever.\",\"type\":\"text\"},{\"label\":\"Candidate + Labels\",\"content\":\"CINEMA, ART, MUSIC\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"CINEMA\",\"score\":0.9},{\"label\":\"ART\",\"score\":0.1},{\"label\":\"MUSIC\",\"score\":0}]}]},\"metrics\":[],\"models\":[{\"description\":\"Powerful + zero-shot text classification model.\",\"id\":\"facebook/bart-large-mnli\"},{\"description\":\"Powerful + zero-shot multilingual text classification model that can accomplish multiple + tasks.\",\"id\":\"MoritzLaurer/mDeBERTa-v3-base-xnli-multilingual-nli-2mil7\"}],\"spaces\":[],\"summary\":\"Zero-shot + text classification is a task in natural language processing where a model + is trained on a set of labeled examples but is then able to classify new examples + from previously unseen classes.\",\"widgetModels\":[\"facebook/bart-large-mnli\"],\"id\":\"zero-shot-classification\",\"label\":\"Zero-Shot + Classification\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"zero-shot-image-classification\":{\"datasets\":[{\"description\":\"\",\"id\":\"\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-classification-input.jpeg\",\"type\":\"img\"},{\"label\":\"Classes\",\"content\":\"cat, + dog, bird\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Cat\",\"score\":0.664},{\"label\":\"Dog\",\"score\":0.329},{\"label\":\"Bird\",\"score\":0.008}]}]},\"metrics\":[{\"description\":\"Computes + the number of times the correct label appears in top K labels predicted\",\"id\":\"top-K + accuracy\"}],\"models\":[{\"description\":\"Robust image classification model + trained on publicly available image-caption data.\",\"id\":\"openai/clip-vit-base-patch16\"},{\"description\":\"Strong + zero-shot image classification model.\",\"id\":\"google/siglip-so400m-patch14-224\"},{\"description\":\"Small + yet powerful zero-shot image classification model that can run on edge devices.\",\"id\":\"apple/MobileCLIP-S1-OpenCLIP\"},{\"description\":\"Strong + image classification model for biomedical domain.\",\"id\":\"microsoft/BiomedCLIP-PubMedBERT_256-vit_base_patch16_224\"}],\"spaces\":[{\"description\":\"An + application that leverages zero-shot image classification to find best captions + to generate an image. \",\"id\":\"pharma/CLIP-Interrogator\"},{\"description\":\"An + application to compare different zero-shot image classification models. \",\"id\":\"merve/compare_clip_siglip\"}],\"summary\":\"Zero-shot + image classification is the task of classifying previously unseen classes + during training of a model.\",\"widgetModels\":[\"google/siglip-so400m-patch14-224\"],\"youtubeId\":\"\",\"id\":\"zero-shot-image-classification\",\"label\":\"Zero-Shot + Image Classification\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"zero-shot-object-detection\":{\"datasets\":[],\"demo\":{\"inputs\":[{\"filename\":\"zero-shot-object-detection-input.jpg\",\"type\":\"img\"},{\"label\":\"Classes\",\"content\":\"cat, + dog, bird\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"zero-shot-object-detection-output.jpg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + Average Precision (AP) metric is the Area Under the PR Curve (AUC-PR). It + is calculated for each class separately\",\"id\":\"Average Precision\"},{\"description\":\"The + Mean Average Precision (mAP) metric is the overall average of the AP values\",\"id\":\"Mean + Average Precision\"},{\"description\":\"The AP\u03B1 metric is the Average + Precision at the IoU threshold of a \u03B1 value, for example, AP50 and AP75\",\"id\":\"AP\u03B1\"}],\"models\":[{\"description\":\"Solid + zero-shot object detection model.\",\"id\":\"IDEA-Research/grounding-dino-base\"},{\"description\":\"Cutting-edge + zero-shot object detection model.\",\"id\":\"google/owlv2-base-patch16-ensemble\"}],\"spaces\":[{\"description\":\"A + demo to try the state-of-the-art zero-shot object detection model, OWLv2.\",\"id\":\"merve/owlv2\"},{\"description\":\"A + demo that combines a zero-shot object detection and mask generation model + for zero-shot segmentation.\",\"id\":\"merve/OWLSAM\"}],\"summary\":\"Zero-shot + object detection is a computer vision task to detect objects and their classes + in images, without any prior training or knowledge of the classes. Zero-shot + object detection models receive an image as input, as well as a list of candidate + classes, and output the bounding boxes and labels where the objects have been + detected.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"zero-shot-object-detection\",\"label\":\"Zero-Shot + Object Detection\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"text-to-3d\":{\"datasets\":[{\"description\":\"A + large dataset of over 10 million 3D objects.\",\"id\":\"allenai/objaverse-xl\"},{\"description\":\"Descriptive + captions for 3D objects in Objaverse.\",\"id\":\"tiange/Cap3D\"}],\"demo\":{\"inputs\":[{\"label\":\"Prompt\",\"content\":\"a + cat statue\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Result\",\"content\":\"text-to-3d-3d-output-filename.glb\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"Text-to-3D + mesh model by OpenAI\",\"id\":\"openai/shap-e\"},{\"description\":\"Generative + 3D gaussian splatting model.\",\"id\":\"ashawkey/LGM\"}],\"spaces\":[{\"description\":\"Text-to-3D + demo with mesh outputs.\",\"id\":\"hysts/Shap-E\"},{\"description\":\"Text/image-to-3D + demo with splat outputs.\",\"id\":\"ashawkey/LGM\"}],\"summary\":\"Text-to-3D + models take in text input and produce 3D output.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"text-to-3d\",\"label\":\"Text-to-3D\",\"libraries\":[\"diffusers\"]},\"image-to-3d\":{\"datasets\":[{\"description\":\"A + large dataset of over 10 million 3D objects.\",\"id\":\"allenai/objaverse-xl\"},{\"description\":\"A + dataset of isolated object images for evaluating image-to-3D models.\",\"id\":\"dylanebert/iso3d\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-to-3d-image-input.png\",\"type\":\"img\"}],\"outputs\":[{\"label\":\"Result\",\"content\":\"image-to-3d-3d-output-filename.glb\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"Fast + image-to-3D mesh model by Tencent.\",\"id\":\"TencentARC/InstantMesh\"},{\"description\":\"Fast + image-to-3D mesh model by StabilityAI\",\"id\":\"stabilityai/TripoSR\"},{\"description\":\"A + scaled up image-to-3D mesh model derived from TripoSR.\",\"id\":\"hwjiang/Real3D\"},{\"description\":\"Generative + 3D gaussian splatting model.\",\"id\":\"ashawkey/LGM\"}],\"spaces\":[{\"description\":\"Leaderboard + to evaluate image-to-3D models.\",\"id\":\"dylanebert/3d-arena\"},{\"description\":\"Image-to-3D + demo with mesh outputs.\",\"id\":\"TencentARC/InstantMesh\"},{\"description\":\"Image-to-3D + demo with mesh outputs.\",\"id\":\"stabilityai/TripoSR\"},{\"description\":\"Image-to-3D + demo with mesh outputs.\",\"id\":\"hwjiang/Real3D\"},{\"description\":\"Image-to-3D + demo with splat outputs.\",\"id\":\"dylanebert/LGM-mini\"}],\"summary\":\"Image-to-3D + models take in image input and produce 3D output.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"image-to-3d\",\"label\":\"Image-to-3D\",\"libraries\":[\"diffusers\"]}}" + headers: + Access-Control-Allow-Origin: + - https://huggingface.co + Access-Control-Expose-Headers: + - X-Repo-Commit,X-Request-Id,X-Error-Code,X-Error-Message,X-Total-Count,ETag,Link,Accept-Ranges,Content-Range + Connection: + - keep-alive + Content-Length: + - '73726' + Content-Type: + - application/json; charset=utf-8 + Date: + - Thu, 31 Oct 2024 13:31:14 GMT + ETag: + - W/"11ffe-LYeRHzSmA1Ja2XTx5UWNPygcpnc" + Referrer-Policy: + - strict-origin-when-cross-origin + Vary: + - Origin + Via: + - 1.1 b52390676bd7dc3663adaa0cf42ed602.cloudfront.net (CloudFront) + X-Amz-Cf-Id: + - -rZ7srk4W5aUxqeHfnzBJIdQDPbUWu0EArq4AXh360a792UhElv3hA== + X-Amz-Cf-Pop: + - CCU50-P1 + X-Cache: + - Miss from cloudfront + X-Powered-By: + - huggingface-moon + X-Request-Id: + - Root=1-672386a2-67c243890eec2f87078d65cb;633ce332-181c-4ea9-b395-a44c73013939 + cross-origin-opener-policy: + - same-origin + status: + code: 200 + message: OK +- request: + body: '{"inputs": "Lorem Ipsum is simply dummy text of the printing and typesetting + industry. Lorem Ipsum has been the industry''s standard dummy text ever since + the 1500s, when an unknown printer took a galley of type and scrambled it to + make a type specimen book. It has survived not only five centuries, but also + the leap into electronic typesetting, remaining essentially unchanged. It was + popularised in the 1960s with the release of Letraset sheets containing Lorem + Ipsum passages, and more recently with desktop publishing software like Aldus + PageMaker including versions of Lorem Ipsum."}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate, br, zstd + Connection: + - keep-alive + Content-Length: + - '588' + Content-Type: + - application/json + X-Amzn-Trace-Id: + - 5ebb4a78-fd6e-4676-81c4-172dec5628e1 + user-agent: + - unknown/None; hf_hub/0.26.2; python/3.9.12; torch/2.4.1 + method: POST + uri: https://api-inference.huggingface.co/models/facebook/bart-large-cnn + response: + body: + string: '[{"summary_text":"Lorem Ipsum has been the industry''s standard dummy + text ever since the 1500s. It has survived not only five centuries, but also + the leap into electronic typesetting. It was popularised in the 1960s with + the release of Letraset sheets. More recently with desktop publishing software + like Aldus PageMaker."}]' + headers: + Connection: + - keep-alive + Content-Length: + - '324' + Content-Type: + - application/json + Date: + - Thu, 31 Oct 2024 13:31:15 GMT + access-control-allow-credentials: + - 'true' + vary: + - Origin, Access-Control-Request-Method, Access-Control-Request-Headers + x-compute-time: + - '5.009' + x-compute-type: + - cache + x-request-id: + - Na95BpqUsyFqCCGHqr1OI + x-sha: + - 37f520fa929c961707657b28798b30c003dd100b + status: + code: 200 + message: OK +version: 1 diff --git a/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_table_question_answering.yaml b/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_table_question_answering.yaml new file mode 100644 index 00000000000..61969fd83dd --- /dev/null +++ b/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_table_question_answering.yaml @@ -0,0 +1,50 @@ +interactions: +- request: + body: '{"inputs": {"query": "How many stars does the transformers repository have?", + "table": {"Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": + ["36542", "4512", "3934"]}}}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate, br, zstd + Connection: + - keep-alive + Content-Length: + - '183' + Content-Type: + - application/json + X-Amzn-Trace-Id: + - 494961aa-d7a4-4553-b6d8-af63e63c1769 + user-agent: + - unknown/None; hf_hub/0.26.2; python/3.9.12; torch/2.4.1 + method: POST + uri: https://api-inference.huggingface.co/models/google/tapas-base-finetuned-wtq + response: + body: + string: '{"answer":"AVERAGE > 36542","coordinates":[[0,1]],"cells":["36542"],"aggregator":"AVERAGE"}' + headers: + Connection: + - keep-alive + Content-Length: + - '91' + Content-Type: + - application/json + Date: + - Thu, 31 Oct 2024 13:43:34 GMT + access-control-allow-credentials: + - 'true' + vary: + - Origin, Access-Control-Request-Method, Access-Control-Request-Headers + x-compute-time: + - '0.041' + x-compute-type: + - cache + x-request-id: + - iL7q5CZtKfpfvV0WyQ6xZ + x-sha: + - e3dde1905dea877b0df1a5c057533e48327dee77 + status: + code: 200 + message: OK +version: 1 diff --git a/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_table_question_answering_async.yaml b/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_table_question_answering_async.yaml new file mode 100644 index 00000000000..bd217ab89d0 --- /dev/null +++ b/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_table_question_answering_async.yaml @@ -0,0 +1,36 @@ +interactions: +- request: + body: null + headers: + user-agent: + - unknown/None; hf_hub/0.26.2; python/3.9.12; torch/2.4.1 + method: POST + uri: https://api-inference.huggingface.co/models/google/tapas-base-finetuned-wtq + response: + body: + string: '{"answer":"AVERAGE > 36542","coordinates":[[0,1]],"cells":["36542"],"aggregator":"AVERAGE"}' + headers: + Access-Control-Allow-Credentials: + - 'true' + Connection: + - keep-alive + Content-Length: + - '91' + Content-Type: + - application/json + Date: + - Thu, 31 Oct 2024 13:43:49 GMT + Vary: + - Origin, Access-Control-Request-Method, Access-Control-Request-Headers + x-compute-time: + - '0.041' + x-compute-type: + - cache + x-request-id: + - wzl87Bj6gay8-ILGAO2SP + x-sha: + - e3dde1905dea877b0df1a5c057533e48327dee77 + status: + code: 200 + message: OK +version: 1 diff --git a/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_text_classification.yaml b/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_text_classification.yaml new file mode 100644 index 00000000000..993b8c08eaf --- /dev/null +++ b/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_text_classification.yaml @@ -0,0 +1,790 @@ +interactions: +- request: + body: null + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate, br, zstd + Connection: + - keep-alive + X-Amzn-Trace-Id: + - 387de72a-a9c8-4552-9bbc-d72ef88c83af + user-agent: + - unknown/None; hf_hub/0.26.2; python/3.9.12; torch/2.4.1 + method: GET + uri: https://huggingface.co/api/tasks + response: + body: + string: "{\"any-to-any\":{\"datasets\":[],\"demo\":{\"inputs\":[],\"outputs\":[]},\"isPlaceholder\":true,\"metrics\":[],\"models\":[],\"spaces\":[],\"summary\":\"\",\"widgetModels\":[],\"id\":\"any-to-any\",\"label\":\"Any-to-Any\",\"libraries\":[\"transformers\"]},\"audio-classification\":{\"datasets\":[{\"description\":\"A + benchmark of 10 different audio tasks.\",\"id\":\"s3prl/superb\"},{\"description\":\"A + dataset of YouTube clips and their sound categories.\",\"id\":\"agkphysics/AudioSet\"}],\"demo\":{\"inputs\":[{\"filename\":\"audio.wav\",\"type\":\"audio\"}],\"outputs\":[{\"data\":[{\"label\":\"Up\",\"score\":0.2},{\"label\":\"Down\",\"score\":0.8}],\"type\":\"chart\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"An + easy-to-use model for command recognition.\",\"id\":\"speechbrain/google_speech_command_xvector\"},{\"description\":\"An + emotion recognition model.\",\"id\":\"ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition\"},{\"description\":\"A + language identification model.\",\"id\":\"facebook/mms-lid-126\"}],\"spaces\":[{\"description\":\"An + application that can classify music into different genre.\",\"id\":\"kurianbenoy/audioclassification\"}],\"summary\":\"Audio + classification is the task of assigning a label or class to a given audio. + It can be used for recognizing which command a user is giving or the emotion + of a statement, as well as identifying a speaker.\",\"widgetModels\":[\"MIT/ast-finetuned-audioset-10-10-0.4593\"],\"youtubeId\":\"KWwzcmG98Ds\",\"id\":\"audio-classification\",\"label\":\"Audio + Classification\",\"libraries\":[\"speechbrain\",\"transformers\",\"transformers.js\"]},\"audio-to-audio\":{\"datasets\":[{\"description\":\"512-element + X-vector embeddings of speakers from CMU ARCTIC dataset.\",\"id\":\"Matthijs/cmu-arctic-xvectors\"}],\"demo\":{\"inputs\":[{\"filename\":\"input.wav\",\"type\":\"audio\"}],\"outputs\":[{\"filename\":\"label-0.wav\",\"type\":\"audio\"},{\"filename\":\"label-1.wav\",\"type\":\"audio\"}]},\"metrics\":[{\"description\":\"The + Signal-to-Noise ratio is the relationship between the target signal level + and the background noise level. It is calculated as the logarithm of the target + signal divided by the background noise, in decibels.\",\"id\":\"snri\"},{\"description\":\"The + Signal-to-Distortion ratio is the relationship between the target signal and + the sum of noise, interference, and artifact errors\",\"id\":\"sdri\"}],\"models\":[{\"description\":\"A + solid model of audio source separation.\",\"id\":\"speechbrain/sepformer-wham\"},{\"description\":\"A + speech enhancement model.\",\"id\":\"ResembleAI/resemble-enhance\"},{\"description\":\"A + model that can change the voice in a speech recording.\",\"id\":\"microsoft/speecht5_vc\"}],\"spaces\":[{\"description\":\"An + application for speech separation.\",\"id\":\"younver/speechbrain-speech-separation\"},{\"description\":\"An + application for audio style transfer.\",\"id\":\"nakas/audio-diffusion_style_transfer\"}],\"summary\":\"Audio-to-Audio + is a family of tasks in which the input is an audio and the output is one + or multiple generated audios. Some example tasks are speech enhancement and + source separation.\",\"widgetModels\":[\"speechbrain/sepformer-wham\"],\"youtubeId\":\"iohj7nCCYoM\",\"id\":\"audio-to-audio\",\"label\":\"Audio-to-Audio\",\"libraries\":[\"asteroid\",\"fairseq\",\"speechbrain\"]},\"automatic-speech-recognition\":{\"datasets\":[{\"description\":\"31,175 + hours of multilingual audio-text dataset in 108 languages.\",\"id\":\"mozilla-foundation/common_voice_17_0\"},{\"description\":\"A + dataset with 44.6k hours of English speaker data and 6k hours of other language + speakers.\",\"id\":\"parler-tts/mls_eng\"},{\"description\":\"A multi-lingual + audio dataset with 370K hours of audio.\",\"id\":\"espnet/yodas\"}],\"demo\":{\"inputs\":[{\"filename\":\"input.flac\",\"type\":\"audio\"}],\"outputs\":[{\"label\":\"Transcript\",\"content\":\"Going + along slushy country roads and speaking to damp audiences in...\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"wer\"},{\"description\":\"\",\"id\":\"cer\"}],\"models\":[{\"description\":\"A + powerful ASR model by OpenAI.\",\"id\":\"openai/whisper-large-v3\"},{\"description\":\"A + good generic speech model by MetaAI for fine-tuning.\",\"id\":\"facebook/w2v-bert-2.0\"},{\"description\":\"An + end-to-end model that performs ASR and Speech Translation by MetaAI.\",\"id\":\"facebook/seamless-m4t-v2-large\"},{\"description\":\"Powerful + speaker diarization model.\",\"id\":\"pyannote/speaker-diarization-3.1\"}],\"spaces\":[{\"description\":\"A + powerful general-purpose speech recognition application.\",\"id\":\"hf-audio/whisper-large-v3\"},{\"description\":\"Fastest + speech recognition application.\",\"id\":\"sanchit-gandhi/whisper-jax\"},{\"description\":\"A + high quality speech and text translation model by Meta.\",\"id\":\"facebook/seamless_m4t\"}],\"summary\":\"Automatic + Speech Recognition (ASR), also known as Speech to Text (STT), is the task + of transcribing a given audio to text. It has many applications, such as voice + user interfaces.\",\"widgetModels\":[\"openai/whisper-large-v3\"],\"youtubeId\":\"TksaY_FDgnk\",\"id\":\"automatic-speech-recognition\",\"label\":\"Automatic + Speech Recognition\",\"libraries\":[\"espnet\",\"nemo\",\"speechbrain\",\"transformers\",\"transformers.js\"]},\"depth-estimation\":{\"datasets\":[{\"description\":\"NYU + Depth V2 Dataset: Video dataset containing both RGB and depth sensor data.\",\"id\":\"sayakpaul/nyu_depth_v2\"},{\"description\":\"Monocular + depth estimation benchmark based without noise and errors.\",\"id\":\"depth-anything/DA-2K\"}],\"demo\":{\"inputs\":[{\"filename\":\"depth-estimation-input.jpg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"depth-estimation-output.png\",\"type\":\"img\"}]},\"metrics\":[],\"models\":[{\"description\":\"Cutting-edge + depth estimation model.\",\"id\":\"depth-anything/Depth-Anything-V2-Large\"},{\"description\":\"A + strong monocular depth estimation model.\",\"id\":\"jingheya/lotus-depth-g-v1-0\"},{\"description\":\"A + depth estimation model that predicts depth in videos.\",\"id\":\"tencent/DepthCrafter\"},{\"description\":\"A + robust depth estimation model.\",\"id\":\"apple/DepthPro\"}],\"spaces\":[{\"description\":\"An + application that predicts the depth of an image and then reconstruct the 3D + model as voxels.\",\"id\":\"radames/dpt-depth-estimation-3d-voxels\"},{\"description\":\"An + application for bleeding-edge depth estimation.\",\"id\":\"akhaliq/depth-pro\"},{\"description\":\"An + application on cutting-edge depth estimation in videos.\",\"id\":\"tencent/DepthCrafter\"},{\"description\":\"A + human-centric depth estimation application.\",\"id\":\"facebook/sapiens-depth\"}],\"summary\":\"Depth + estimation is the task of predicting depth of the objects present in an image.\",\"widgetModels\":[\"\"],\"youtubeId\":\"\",\"id\":\"depth-estimation\",\"label\":\"Depth + Estimation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"document-question-answering\":{\"datasets\":[{\"description\":\"Largest + document understanding dataset.\",\"id\":\"HuggingFaceM4/Docmatix\"},{\"description\":\"Dataset + from the 2020 DocVQA challenge. The documents are taken from the UCSF Industry + Documents Library.\",\"id\":\"eliolio/docvqa\"}],\"demo\":{\"inputs\":[{\"label\":\"Question\",\"content\":\"What + is the idea behind the consumer relations efficiency team?\",\"type\":\"text\"},{\"filename\":\"document-question-answering-input.png\",\"type\":\"img\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"Balance + cost efficiency with quality customer service\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"The + evaluation metric for the DocVQA challenge is the Average Normalized Levenshtein + Similarity (ANLS). This metric is flexible to character regognition errors + and compares the predicted answer with the ground truth answer.\",\"id\":\"anls\"},{\"description\":\"Exact + Match is a metric based on the strict character match of the predicted answer + and the right answer. For answers predicted correctly, the Exact Match will + be 1. Even if only one character is different, Exact Match will be 0\",\"id\":\"exact-match\"}],\"models\":[{\"description\":\"A + robust document question answering model.\",\"id\":\"impira/layoutlm-document-qa\"},{\"description\":\"A + document question answering model specialized in invoices.\",\"id\":\"impira/layoutlm-invoices\"},{\"description\":\"A + special model for OCR-free document question answering.\",\"id\":\"microsoft/udop-large\"},{\"description\":\"A + powerful model for document question answering.\",\"id\":\"google/pix2struct-docvqa-large\"}],\"spaces\":[{\"description\":\"A + robust document question answering application.\",\"id\":\"impira/docquery\"},{\"description\":\"An + application that can answer questions from invoices.\",\"id\":\"impira/invoices\"},{\"description\":\"An + application to compare different document question answering models.\",\"id\":\"merve/compare_docvqa_models\"}],\"summary\":\"Document + Question Answering (also known as Document Visual Question Answering) is the + task of answering questions on document images. Document question answering + models take a (document, question) pair as input and return an answer in natural + language. Models usually rely on multi-modal features, combining text, position + of words (bounding-boxes) and image.\",\"widgetModels\":[\"impira/layoutlm-invoices\"],\"youtubeId\":\"\",\"id\":\"document-question-answering\",\"label\":\"Document + Question Answering\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"feature-extraction\":{\"datasets\":[{\"description\":\"Wikipedia + dataset containing cleaned articles of all languages. Can be used to train + `feature-extraction` models.\",\"id\":\"wikipedia\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"India, + officially the Republic of India, is a country in South Asia.\",\"type\":\"text\"}],\"outputs\":[{\"table\":[[\"Dimension + 1\",\"Dimension 2\",\"Dimension 3\"],[\"2.583383083343506\",\"2.757075071334839\",\"0.9023529887199402\"],[\"8.29393482208252\",\"1.1071064472198486\",\"2.03399395942688\"],[\"-0.7754912972450256\",\"-1.647324562072754\",\"-0.6113331913948059\"],[\"0.07087723910808563\",\"1.5942802429199219\",\"1.4610432386398315\"]],\"type\":\"tabular\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + powerful feature extraction model for natural language processing tasks.\",\"id\":\"thenlper/gte-large\"},{\"description\":\"A + strong feature extraction model for retrieval.\",\"id\":\"Alibaba-NLP/gte-Qwen1.5-7B-instruct\"}],\"spaces\":[{\"description\":\"A + leaderboard to rank text feature extraction models based on a benchmark.\",\"id\":\"mteb/leaderboard\"},{\"description\":\"A + leaderboard to rank best feature extraction models based on human feedback.\",\"id\":\"mteb/arena\"}],\"summary\":\"Feature + extraction is the task of extracting features learnt in a model.\",\"widgetModels\":[\"facebook/bart-base\"],\"id\":\"feature-extraction\",\"label\":\"Feature + Extraction\",\"libraries\":[\"sentence-transformers\",\"transformers\",\"transformers.js\"]},\"fill-mask\":{\"datasets\":[{\"description\":\"A + common dataset that is used to train models for many languages.\",\"id\":\"wikipedia\"},{\"description\":\"A + large English dataset with text crawled from the web.\",\"id\":\"c4\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"The + barked at me\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"wolf\",\"score\":0.487},{\"label\":\"dog\",\"score\":0.061},{\"label\":\"cat\",\"score\":0.058},{\"label\":\"fox\",\"score\":0.047},{\"label\":\"squirrel\",\"score\":0.025}]}]},\"metrics\":[{\"description\":\"Cross + Entropy is a metric that calculates the difference between two probability + distributions. Each probability distribution is the distribution of predicted + words\",\"id\":\"cross_entropy\"},{\"description\":\"Perplexity is the exponential + of the cross-entropy loss. It evaluates the probabilities assigned to the + next word by the model. Lower perplexity indicates better performance\",\"id\":\"perplexity\"}],\"models\":[{\"description\":\"The + famous BERT model.\",\"id\":\"google-bert/bert-base-uncased\"},{\"description\":\"A + multilingual model trained on 100 languages.\",\"id\":\"FacebookAI/xlm-roberta-base\"}],\"spaces\":[],\"summary\":\"Masked + language modeling is the task of masking some of the words in a sentence and + predicting which words should replace those masks. These models are useful + when we want to get a statistical understanding of the language in which the + model is trained in.\",\"widgetModels\":[\"distilroberta-base\"],\"youtubeId\":\"mqElG5QJWUg\",\"id\":\"fill-mask\",\"label\":\"Fill-Mask\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"image-classification\":{\"datasets\":[{\"description\":\"Benchmark + dataset used for image classification with images that belong to 100 classes.\",\"id\":\"cifar100\"},{\"description\":\"Dataset + consisting of images of garments.\",\"id\":\"fashion_mnist\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-classification-input.jpeg\",\"type\":\"img\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Egyptian + cat\",\"score\":0.514},{\"label\":\"Tabby cat\",\"score\":0.193},{\"label\":\"Tiger + cat\",\"score\":0.068}]}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + strong image classification model.\",\"id\":\"google/vit-base-patch16-224\"},{\"description\":\"A + robust image classification model.\",\"id\":\"facebook/deit-base-distilled-patch16-224\"},{\"description\":\"A + strong image classification model.\",\"id\":\"facebook/convnext-large-224\"}],\"spaces\":[{\"description\":\"An + application that classifies what a given image is about.\",\"id\":\"nielsr/perceiver-image-classification\"}],\"summary\":\"Image + classification is the task of assigning a label or class to an entire image. + Images are expected to have only one class for each image. Image classification + models take an image as input and return a prediction about which class the + image belongs to.\",\"widgetModels\":[\"google/vit-base-patch16-224\"],\"youtubeId\":\"tjAIM7BOYhw\",\"id\":\"image-classification\",\"label\":\"Image + Classification\",\"libraries\":[\"keras\",\"timm\",\"transformers\",\"transformers.js\"]},\"image-feature-extraction\":{\"datasets\":[{\"description\":\"ImageNet-1K + is a image classification dataset in which images are used to train image-feature-extraction + models.\",\"id\":\"imagenet-1k\"}],\"demo\":{\"inputs\":[{\"filename\":\"mask-generation-input.png\",\"type\":\"img\"}],\"outputs\":[{\"table\":[[\"Dimension + 1\",\"Dimension 2\",\"Dimension 3\"],[\"0.21236686408519745\",\"1.0919708013534546\",\"0.8512550592422485\"],[\"0.809657871723175\",\"-0.18544459342956543\",\"-0.7851548194885254\"],[\"1.3103108406066895\",\"-0.2479034662246704\",\"-0.9107287526130676\"],[\"1.8536205291748047\",\"-0.36419737339019775\",\"0.09717650711536407\"]],\"type\":\"tabular\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + powerful image feature extraction model.\",\"id\":\"timm/vit_large_patch14_dinov2.lvd142m\"},{\"description\":\"A + strong image feature extraction model.\",\"id\":\"nvidia/MambaVision-T-1K\"},{\"description\":\"A + robust image feature extraction model.\",\"id\":\"facebook/dino-vitb16\"},{\"description\":\"Strong + image feature extraction model made for information retrieval from documents.\",\"id\":\"vidore/colpali\"},{\"description\":\"Strong + image feature extraction model that can be used on images and documents.\",\"id\":\"OpenGVLab/InternViT-6B-448px-V1-2\"}],\"spaces\":[],\"summary\":\"Image + feature extraction is the task of extracting features learnt in a computer + vision model.\",\"widgetModels\":[],\"id\":\"image-feature-extraction\",\"label\":\"Image + Feature Extraction\",\"libraries\":[\"timm\",\"transformers\"]},\"image-segmentation\":{\"datasets\":[{\"description\":\"Scene + segmentation dataset.\",\"id\":\"scene_parse_150\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-segmentation-input.jpeg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"image-segmentation-output.png\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"Average + Precision (AP) is the Area Under the PR Curve (AUC-PR). It is calculated for + each semantic class separately\",\"id\":\"Average Precision\"},{\"description\":\"Mean + Average Precision (mAP) is the overall average of the AP values\",\"id\":\"Mean + Average Precision\"},{\"description\":\"Intersection over Union (IoU) is the + overlap of segmentation masks. Mean IoU is the average of the IoU of all semantic + classes\",\"id\":\"Mean Intersection over Union\"},{\"description\":\"AP\u03B1 + is the Average Precision at the IoU threshold of a \u03B1 value, for example, + AP50 and AP75\",\"id\":\"AP\u03B1\"}],\"models\":[{\"description\":\"Solid + semantic segmentation model trained on ADE20k.\",\"id\":\"openmmlab/upernet-convnext-small\"},{\"description\":\"Background + removal model.\",\"id\":\"briaai/RMBG-1.4\"},{\"description\":\"A multipurpose + image segmentation model for high resolution images.\",\"id\":\"ZhengPeng7/BiRefNet\"},{\"description\":\"Powerful + human-centric image segmentation model.\",\"id\":\"facebook/sapiens-seg-1b\"},{\"description\":\"Panoptic + segmentation model trained on the COCO (common objects) dataset.\",\"id\":\"facebook/mask2former-swin-large-coco-panoptic\"}],\"spaces\":[{\"description\":\"A + semantic segmentation application that can predict unseen instances out of + the box.\",\"id\":\"facebook/ov-seg\"},{\"description\":\"One of the strongest + segmentation applications.\",\"id\":\"jbrinkma/segment-anything\"},{\"description\":\"A + human-centric segmentation model.\",\"id\":\"facebook/sapiens-pose\"},{\"description\":\"An + instance segmentation application to predict neuronal cell types from microscopy + images.\",\"id\":\"rashmi/sartorius-cell-instance-segmentation\"},{\"description\":\"An + application that segments videos.\",\"id\":\"ArtGAN/Segment-Anything-Video\"},{\"description\":\"An + panoptic segmentation application built for outdoor environments.\",\"id\":\"segments/panoptic-segment-anything\"}],\"summary\":\"Image + Segmentation divides an image into segments where each pixel in the image + is mapped to an object. This task has multiple variants such as instance segmentation, + panoptic segmentation and semantic segmentation.\",\"widgetModels\":[\"nvidia/segformer-b0-finetuned-ade-512-512\"],\"youtubeId\":\"dKE8SIt9C-w\",\"id\":\"image-segmentation\",\"label\":\"Image + Segmentation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"image-to-image\":{\"datasets\":[{\"description\":\"Synthetic + dataset, for image relighting\",\"id\":\"VIDIT\"},{\"description\":\"Multiple + images of celebrities, used for facial expression translation\",\"id\":\"huggan/CelebA-faces\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-to-image-input.jpeg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"image-to-image-output.png\",\"type\":\"img\"}]},\"isPlaceholder\":false,\"metrics\":[{\"description\":\"Peak + Signal to Noise Ratio (PSNR) is an approximation of the human perception, + considering the ratio of the absolute intensity with respect to the variations. + Measured in dB, a high value indicates a high fidelity.\",\"id\":\"PSNR\"},{\"description\":\"Structural + Similarity Index (SSIM) is a perceptual metric which compares the luminance, + contrast and structure of two images. The values of SSIM range between -1 + and 1, and higher values indicate closer resemblance to the original image.\",\"id\":\"SSIM\"},{\"description\":\"Inception + Score (IS) is an analysis of the labels predicted by an image classification + model when presented with a sample of the generated images.\",\"id\":\"IS\"}],\"models\":[{\"description\":\"An + image-to-image model to improve image resolution.\",\"id\":\"fal/AuraSR-v2\"},{\"description\":\"A + model that increases the resolution of an image.\",\"id\":\"keras-io/super-resolution\"},{\"description\":\"A + model that creates a set of variations of the input image in the style of + DALL-E using Stable Diffusion.\",\"id\":\"lambdalabs/sd-image-variations-diffusers\"},{\"description\":\"A + model that generates images based on segments in the input image and the text + prompt.\",\"id\":\"mfidabel/controlnet-segment-anything\"},{\"description\":\"A + model that takes an image and an instruction to edit the image.\",\"id\":\"timbrooks/instruct-pix2pix\"}],\"spaces\":[{\"description\":\"Image + enhancer application for low light.\",\"id\":\"keras-io/low-light-image-enhancement\"},{\"description\":\"Style + transfer application.\",\"id\":\"keras-io/neural-style-transfer\"},{\"description\":\"An + application that generates images based on segment control.\",\"id\":\"mfidabel/controlnet-segment-anything\"},{\"description\":\"Image + generation application that takes image control and text prompt.\",\"id\":\"hysts/ControlNet\"},{\"description\":\"Colorize + any image using this app.\",\"id\":\"ioclab/brightness-controlnet\"},{\"description\":\"Edit + images with instructions.\",\"id\":\"timbrooks/instruct-pix2pix\"}],\"summary\":\"Image-to-image + is the task of transforming an input image through a variety of possible manipulations + and enhancements, such as super-resolution, image inpainting, colorization, + and more.\",\"widgetModels\":[\"stabilityai/stable-diffusion-2-inpainting\"],\"youtubeId\":\"\",\"id\":\"image-to-image\",\"label\":\"Image-to-Image\",\"libraries\":[\"diffusers\",\"transformers\",\"transformers.js\"]},\"image-text-to-text\":{\"datasets\":[{\"description\":\"Instructions + composed of image and text.\",\"id\":\"liuhaotian/LLaVA-Instruct-150K\"},{\"description\":\"Conversation + turns where questions involve image and text.\",\"id\":\"liuhaotian/LLaVA-Pretrain\"},{\"description\":\"A + collection of datasets made for model fine-tuning.\",\"id\":\"HuggingFaceM4/the_cauldron\"},{\"description\":\"Screenshots + of websites with their HTML/CSS codes.\",\"id\":\"HuggingFaceM4/WebSight\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-text-to-text-input.png\",\"type\":\"img\"},{\"label\":\"Text + Prompt\",\"content\":\"Describe the position of the bee in detail.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"The + bee is sitting on a pink flower, surrounded by other flowers. The bee is positioned + in the center of the flower, with its head and front legs sticking out.\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"Powerful + vision language model with great visual understanding and reasoning capabilities.\",\"id\":\"meta-llama/Llama-3.2-11B-Vision-Instruct\"},{\"description\":\"Cutting-edge + vision language models.\",\"id\":\"allenai/Molmo-7B-D-0924\"},{\"description\":\"Small + yet powerful model.\",\"id\":\"vikhyatk/moondream2\"},{\"description\":\"Strong + image-text-to-text model.\",\"id\":\"Qwen/Qwen2-VL-7B-Instruct\"},{\"description\":\"Strong + image-text-to-text model.\",\"id\":\"mistralai/Pixtral-12B-2409\"},{\"description\":\"Strong + image-text-to-text model focused on documents.\",\"id\":\"stepfun-ai/GOT-OCR2_0\"}],\"spaces\":[{\"description\":\"Leaderboard + to evaluate vision language models.\",\"id\":\"opencompass/open_vlm_leaderboard\"},{\"description\":\"Vision + language models arena, where models are ranked by votes of users.\",\"id\":\"WildVision/vision-arena\"},{\"description\":\"Powerful + vision-language model assistant.\",\"id\":\"akhaliq/Molmo-7B-D-0924\"},{\"description\":\"An + image-text-to-text application focused on documents.\",\"id\":\"stepfun-ai/GOT_official_online_demo\"},{\"description\":\"An + application to compare outputs of different vision language models.\",\"id\":\"merve/compare_VLMs\"},{\"description\":\"An + application for chatting with an image-text-to-text model.\",\"id\":\"GanymedeNil/Qwen2-VL-7B\"}],\"summary\":\"Image-text-to-text + models take in an image and text prompt and output text. These models are + also called vision-language models, or VLMs. The difference from image-to-text + models is that these models take an additional text input, not restricting + the model to certain use cases like image captioning, and may also be trained + to accept a conversation as input.\",\"widgetModels\":[\"meta-llama/Llama-3.2-11B-Vision-Instruct\"],\"youtubeId\":\"IoGaGfU1CIg\",\"id\":\"image-text-to-text\",\"label\":\"Image-Text-to-Text\",\"libraries\":[\"transformers\"]},\"image-to-text\":{\"datasets\":[{\"description\":\"Dataset + from 12M image-text of Reddit\",\"id\":\"red_caps\"},{\"description\":\"Dataset + from 3.3M images of Google\",\"id\":\"datasets/conceptual_captions\"}],\"demo\":{\"inputs\":[{\"filename\":\"savanna.jpg\",\"type\":\"img\"}],\"outputs\":[{\"label\":\"Detailed + description\",\"content\":\"a herd of giraffes and zebras grazing in a field\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + robust image captioning model.\",\"id\":\"Salesforce/blip2-opt-2.7b\"},{\"description\":\"A + powerful and accurate image-to-text model that can also localize concepts + in images.\",\"id\":\"microsoft/kosmos-2-patch14-224\"},{\"description\":\"A + strong optical character recognition model.\",\"id\":\"facebook/nougat-base\"},{\"description\":\"A + powerful model that lets you have a conversation with the image.\",\"id\":\"llava-hf/llava-1.5-7b-hf\"}],\"spaces\":[{\"description\":\"An + application that compares various image captioning models.\",\"id\":\"nielsr/comparing-captioning-models\"},{\"description\":\"A + robust image captioning application.\",\"id\":\"flax-community/image-captioning\"},{\"description\":\"An + application that transcribes handwritings into text.\",\"id\":\"nielsr/TrOCR-handwritten\"},{\"description\":\"An + application that can caption images and answer questions about a given image.\",\"id\":\"Salesforce/BLIP\"},{\"description\":\"An + application that can caption images and answer questions with a conversational + agent.\",\"id\":\"Salesforce/BLIP2\"},{\"description\":\"An image captioning + application that demonstrates the effect of noise on captions.\",\"id\":\"johko/capdec-image-captioning\"}],\"summary\":\"Image + to text models output a text from a given image. Image captioning or optical + character recognition can be considered as the most common applications of + image to text.\",\"widgetModels\":[\"Salesforce/blip-image-captioning-large\"],\"youtubeId\":\"\",\"id\":\"image-to-text\",\"label\":\"Image-to-Text\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"keypoint-detection\":{\"datasets\":[{\"description\":\"A + dataset of hand keypoints of over 500k examples.\",\"id\":\"Vincent-luo/hagrid-mediapipe-hands\"}],\"demo\":{\"inputs\":[{\"filename\":\"keypoint-detection-input.png\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"keypoint-detection-output.png\",\"type\":\"img\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + robust keypoint detection model.\",\"id\":\"magic-leap-community/superpoint\"},{\"description\":\"Strong + keypoint detection model used to detect human pose.\",\"id\":\"facebook/sapiens-pose-1b\"}],\"spaces\":[{\"description\":\"An + application that detects hand keypoints in real-time.\",\"id\":\"datasciencedojo/Hand-Keypoint-Detection-Realtime\"},{\"description\":\"An + application to try a universal keypoint detection model.\",\"id\":\"merve/SuperPoint\"}],\"summary\":\"Keypoint + detection is the task of identifying meaningful distinctive points or features + in an image.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"keypoint-detection\",\"label\":\"Keypoint + Detection\",\"libraries\":[\"transformers\"]},\"mask-generation\":{\"datasets\":[],\"demo\":{\"inputs\":[{\"filename\":\"mask-generation-input.png\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"mask-generation-output.png\",\"type\":\"img\"}]},\"metrics\":[],\"models\":[{\"description\":\"Small + yet powerful mask generation model.\",\"id\":\"Zigeng/SlimSAM-uniform-50\"},{\"description\":\"Very + strong mask generation model.\",\"id\":\"facebook/sam2-hiera-large\"}],\"spaces\":[{\"description\":\"An + application that combines a mask generation model with a zero-shot object + detection model for text-guided image segmentation.\",\"id\":\"merve/OWLSAM2\"},{\"description\":\"An + application that compares the performance of a large and a small mask generation + model.\",\"id\":\"merve/slimsam\"},{\"description\":\"An application based + on an improved mask generation model.\",\"id\":\"SkalskiP/segment-anything-model-2\"},{\"description\":\"An + application to remove objects from videos using mask generation models.\",\"id\":\"SkalskiP/SAM_and_ProPainter\"}],\"summary\":\"Mask + generation is the task of generating masks that identify a specific object + or region of interest in a given image. Masks are often used in segmentation + tasks, where they provide a precise way to isolate the object of interest + for further processing or analysis.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"mask-generation\",\"label\":\"Mask + Generation\",\"libraries\":[\"transformers\"]},\"object-detection\":{\"datasets\":[{\"description\":\"Widely + used benchmark dataset for multiple vision tasks.\",\"id\":\"merve/coco2017\"},{\"description\":\"Multi-task + computer vision benchmark.\",\"id\":\"merve/pascal-voc\"}],\"demo\":{\"inputs\":[{\"filename\":\"object-detection-input.jpg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"object-detection-output.jpg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + Average Precision (AP) metric is the Area Under the PR Curve (AUC-PR). It + is calculated for each class separately\",\"id\":\"Average Precision\"},{\"description\":\"The + Mean Average Precision (mAP) metric is the overall average of the AP values\",\"id\":\"Mean + Average Precision\"},{\"description\":\"The AP\u03B1 metric is the Average + Precision at the IoU threshold of a \u03B1 value, for example, AP50 and AP75\",\"id\":\"AP\u03B1\"}],\"models\":[{\"description\":\"Solid + object detection model pre-trained on the COCO 2017 dataset.\",\"id\":\"facebook/detr-resnet-50\"},{\"description\":\"Real-time + and accurate object detection model.\",\"id\":\"jameslahm/yolov10x\"},{\"description\":\"Fast + and accurate object detection model trained on COCO and Object365 datasets.\",\"id\":\"PekingU/rtdetr_r18vd_coco_o365\"}],\"spaces\":[{\"description\":\"Leaderboard + to compare various object detection models across several metrics.\",\"id\":\"hf-vision/object_detection_leaderboard\"},{\"description\":\"An + application that contains various object detection models to try from.\",\"id\":\"Gradio-Blocks/Object-Detection-With-DETR-and-YOLOS\"},{\"description\":\"An + application that shows multiple cutting edge techniques for object detection + and tracking.\",\"id\":\"kadirnar/torchyolo\"},{\"description\":\"An object + tracking, segmentation and inpainting application.\",\"id\":\"VIPLab/Track-Anything\"},{\"description\":\"Very + fast object tracking application based on object detection.\",\"id\":\"merve/RT-DETR-tracking-coco\"}],\"summary\":\"Object + Detection models allow users to identify objects of certain defined classes. + Object detection models receive an image as input and output the images with + bounding boxes and labels on detected objects.\",\"widgetModels\":[\"facebook/detr-resnet-50\"],\"youtubeId\":\"WdAeKSOpxhw\",\"id\":\"object-detection\",\"label\":\"Object + Detection\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"video-classification\":{\"datasets\":[{\"description\":\"Benchmark + dataset used for video classification with videos that belong to 400 classes.\",\"id\":\"kinetics400\"}],\"demo\":{\"inputs\":[{\"filename\":\"video-classification-input.gif\",\"type\":\"img\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Playing + Guitar\",\"score\":0.514},{\"label\":\"Playing Tennis\",\"score\":0.193},{\"label\":\"Cooking\",\"score\":0.068}]}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"Strong + Video Classification model trained on the Kinetics 400 dataset.\",\"id\":\"google/vivit-b-16x2-kinetics400\"},{\"description\":\"Strong + Video Classification model trained on the Kinetics 400 dataset.\",\"id\":\"microsoft/xclip-base-patch32\"}],\"spaces\":[{\"description\":\"An + application that classifies video at different timestamps.\",\"id\":\"nateraw/lavila\"},{\"description\":\"An + application that classifies video.\",\"id\":\"fcakyon/video-classification\"}],\"summary\":\"Video + classification is the task of assigning a label or class to an entire video. + Videos are expected to have only one class for each video. Video classification + models take a video as input and return a prediction about which class the + video belongs to.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"video-classification\",\"label\":\"Video + Classification\",\"libraries\":[\"transformers\"]},\"question-answering\":{\"datasets\":[{\"description\":\"A + famous question answering dataset based on English articles from Wikipedia.\",\"id\":\"squad_v2\"},{\"description\":\"A + dataset of aggregated anonymized actual queries issued to the Google search + engine.\",\"id\":\"natural_questions\"}],\"demo\":{\"inputs\":[{\"label\":\"Question\",\"content\":\"Which + name is also used to describe the Amazon rainforest in English?\",\"type\":\"text\"},{\"label\":\"Context\",\"content\":\"The + Amazon rainforest, also known in English as Amazonia or the Amazon Jungle\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"Amazonia\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Exact + Match is a metric based on the strict character match of the predicted answer + and the right answer. For answers predicted correctly, the Exact Match will + be 1. Even if only one character is different, Exact Match will be 0\",\"id\":\"exact-match\"},{\"description\":\" + The F1-Score metric is useful if we value both false positives and false negatives + equally. The F1-Score is calculated on each word in the predicted sequence + against the correct answer\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + robust baseline model for most question answering domains.\",\"id\":\"deepset/roberta-base-squad2\"},{\"description\":\"Small + yet robust model that can answer questions.\",\"id\":\"distilbert/distilbert-base-cased-distilled-squad\"},{\"description\":\"A + special model that can answer questions from tables.\",\"id\":\"google/tapas-base-finetuned-wtq\"}],\"spaces\":[{\"description\":\"An + application that can answer a long question from Wikipedia.\",\"id\":\"deepset/wikipedia-assistant\"}],\"summary\":\"Question + Answering models can retrieve the answer to a question from a given text, + which is useful for searching for an answer in a document. Some question answering + models can generate answers without context!\",\"widgetModels\":[\"deepset/roberta-base-squad2\"],\"youtubeId\":\"ajPx5LwJD-I\",\"id\":\"question-answering\",\"label\":\"Question + Answering\",\"libraries\":[\"adapter-transformers\",\"allennlp\",\"transformers\",\"transformers.js\"]},\"reinforcement-learning\":{\"datasets\":[{\"description\":\"A + curation of widely used datasets for Data Driven Deep Reinforcement Learning + (D4RL)\",\"id\":\"edbeeching/decision_transformer_gym_replay\"}],\"demo\":{\"inputs\":[{\"label\":\"State\",\"content\":\"Red + traffic light, pedestrians are about to pass.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Action\",\"content\":\"Stop + the car.\",\"type\":\"text\"},{\"label\":\"Next State\",\"content\":\"Yellow + light, pedestrians have crossed.\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Accumulated + reward across all time steps discounted by a factor that ranges between 0 + and 1 and determines how much the agent optimizes for future relative to immediate + rewards. Measures how good is the policy ultimately found by a given algorithm + considering uncertainty over the future.\",\"id\":\"Discounted Total Reward\"},{\"description\":\"Average + return obtained after running the policy for a certain number of evaluation + episodes. As opposed to total reward, mean reward considers how much reward + a given algorithm receives while learning.\",\"id\":\"Mean Reward\"},{\"description\":\"Measures + how good a given algorithm is after a predefined time. Some algorithms may + be guaranteed to converge to optimal behavior across many time steps. However, + an agent that reaches an acceptable level of optimality after a given time + horizon may be preferable to one that ultimately reaches optimality but takes + a long time.\",\"id\":\"Level of Performance After Some Time\"}],\"models\":[{\"description\":\"A + Reinforcement Learning model trained on expert data from the Gym Hopper environment\",\"id\":\"edbeeching/decision-transformer-gym-hopper-expert\"},{\"description\":\"A + PPO agent playing seals/CartPole-v0 using the stable-baselines3 library and + the RL Zoo.\",\"id\":\"HumanCompatibleAI/ppo-seals-CartPole-v0\"}],\"spaces\":[{\"description\":\"An + application for a cute puppy agent learning to catch a stick.\",\"id\":\"ThomasSimonini/Huggy\"},{\"description\":\"An + application to play Snowball Fight with a reinforcement learning agent.\",\"id\":\"ThomasSimonini/SnowballFight\"}],\"summary\":\"Reinforcement + learning is the computational approach of learning from action by interacting + with an environment through trial and error and receiving rewards (negative + or positive) as feedback\",\"widgetModels\":[],\"youtubeId\":\"q0BiUn5LiBc\",\"id\":\"reinforcement-learning\",\"label\":\"Reinforcement + Learning\",\"libraries\":[\"transformers\",\"stable-baselines3\",\"ml-agents\",\"sample-factory\"]},\"sentence-similarity\":{\"datasets\":[{\"description\":\"Bing + queries with relevant passages from various web sources.\",\"id\":\"ms_marco\"}],\"demo\":{\"inputs\":[{\"label\":\"Source + sentence\",\"content\":\"Machine learning is so easy.\",\"type\":\"text\"},{\"label\":\"Sentences + to compare to\",\"content\":\"Deep learning is so straightforward.\",\"type\":\"text\"},{\"label\":\"\",\"content\":\"This + is so difficult, like rocket science.\",\"type\":\"text\"},{\"label\":\"\",\"content\":\"I + can't believe how much I struggled with this.\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Deep + learning is so straightforward.\",\"score\":0.623},{\"label\":\"This is so + difficult, like rocket science.\",\"score\":0.413},{\"label\":\"I can't believe + how much I struggled with this.\",\"score\":0.256}]}]},\"metrics\":[{\"description\":\"Reciprocal + Rank is a measure used to rank the relevancy of documents given a set of documents. + Reciprocal Rank is the reciprocal of the rank of the document retrieved, meaning, + if the rank is 3, the Reciprocal Rank is 0.33. If the rank is 1, the Reciprocal + Rank is 1\",\"id\":\"Mean Reciprocal Rank\"},{\"description\":\"The similarity + of the embeddings is evaluated mainly on cosine similarity. It is calculated + as the cosine of the angle between two vectors. It is particularly useful + when your texts are not the same length\",\"id\":\"Cosine Similarity\"}],\"models\":[{\"description\":\"This + model works well for sentences and paragraphs and can be used for clustering/grouping + and semantic searches.\",\"id\":\"sentence-transformers/all-mpnet-base-v2\"},{\"description\":\"A + multilingual robust sentence similarity model..\",\"id\":\"BAAI/bge-m3\"}],\"spaces\":[{\"description\":\"An + application that leverages sentence similarity to answer questions from YouTube + videos.\",\"id\":\"Gradio-Blocks/Ask_Questions_To_YouTube_Videos\"},{\"description\":\"An + application that retrieves relevant PubMed abstracts for a given online article + which can be used as further references.\",\"id\":\"Gradio-Blocks/pubmed-abstract-retriever\"},{\"description\":\"An + application that leverages sentence similarity to summarize text.\",\"id\":\"nickmuchi/article-text-summarizer\"},{\"description\":\"A + guide that explains how Sentence Transformers can be used for semantic search.\",\"id\":\"sentence-transformers/Sentence_Transformers_for_semantic_search\"}],\"summary\":\"Sentence + Similarity is the task of determining how similar two texts are. Sentence + similarity models convert input texts into vectors (embeddings) that capture + semantic information and calculate how close (similar) they are between them. + This task is particularly useful for information retrieval and clustering/grouping.\",\"widgetModels\":[\"BAAI/bge-small-en-v1.5\"],\"youtubeId\":\"VCZq5AkbNEU\",\"id\":\"sentence-similarity\",\"label\":\"Sentence + Similarity\",\"libraries\":[\"sentence-transformers\",\"spacy\",\"transformers.js\"]},\"summarization\":{\"canonicalId\":\"text2text-generation\",\"datasets\":[{\"description\":\"News + articles in five different languages along with their summaries. Widely used + for benchmarking multilingual summarization models.\",\"id\":\"mlsum\"},{\"description\":\"English + conversations and their summaries. Useful for benchmarking conversational + agents.\",\"id\":\"samsum\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"The + tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey + building, and the tallest structure in Paris. Its base is square, measuring + 125 metres (410 ft) on each side. It was the first structure to reach a height + of 300 metres. Excluding transmitters, the Eiffel Tower is the second tallest + free-standing structure in France after the Millau Viaduct.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Output\",\"content\":\"The + tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey + building. It was the first structure to reach a height of 300 metres.\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"The + generated sequence is compared against its summary, and the overlap of tokens + are counted. ROUGE-N refers to overlap of N subsequent tokens, ROUGE-1 refers + to overlap of single tokens and ROUGE-2 is the overlap of two subsequent tokens.\",\"id\":\"rouge\"}],\"models\":[{\"description\":\"A + strong summarization model trained on English news articles. Excels at generating + factual summaries.\",\"id\":\"facebook/bart-large-cnn\"},{\"description\":\"A + summarization model trained on medical articles.\",\"id\":\"Falconsai/medical_summarization\"}],\"spaces\":[{\"description\":\"An + application that can summarize long paragraphs.\",\"id\":\"pszemraj/summarize-long-text\"},{\"description\":\"A + much needed summarization application for terms and conditions.\",\"id\":\"ml6team/distilbart-tos-summarizer-tosdr\"},{\"description\":\"An + application that summarizes long documents.\",\"id\":\"pszemraj/document-summarization\"},{\"description\":\"An + application that can detect errors in abstractive summarization.\",\"id\":\"ml6team/post-processing-summarization\"}],\"summary\":\"Summarization + is the task of producing a shorter version of a document while preserving + its important information. Some models can extract text from the original + input, while other models can generate entirely new text.\",\"widgetModels\":[\"facebook/bart-large-cnn\"],\"youtubeId\":\"yHnr5Dk2zCI\",\"id\":\"summarization\",\"label\":\"Summarization\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"table-question-answering\":{\"datasets\":[{\"description\":\"The + WikiTableQuestions dataset is a large-scale dataset for the task of question + answering on semi-structured tables.\",\"id\":\"wikitablequestions\"},{\"description\":\"WikiSQL + is a dataset of 80654 hand-annotated examples of questions and SQL queries + distributed across 24241 tables from Wikipedia.\",\"id\":\"wikisql\"}],\"demo\":{\"inputs\":[{\"table\":[[\"Rank\",\"Name\",\"No.of + reigns\",\"Combined days\"],[\"1\",\"lou Thesz\",\"3\",\"3749\"],[\"2\",\"Ric + Flair\",\"8\",\"3103\"],[\"3\",\"Harley Race\",\"7\",\"1799\"]],\"type\":\"tabular\"},{\"label\":\"Question\",\"content\":\"What + is the number of reigns for Harley Race?\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Result\",\"content\":\"7\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Checks + whether the predicted answer(s) is the same as the ground-truth answer(s).\",\"id\":\"Denotation + Accuracy\"}],\"models\":[{\"description\":\"A table question answering model + that is capable of neural SQL execution, i.e., employ TAPEX to execute a SQL + query on a given table.\",\"id\":\"microsoft/tapex-base\"},{\"description\":\"A + robust table question answering model.\",\"id\":\"google/tapas-base-finetuned-wtq\"}],\"spaces\":[{\"description\":\"An + application that answers questions based on table CSV files.\",\"id\":\"katanaml/table-query\"}],\"summary\":\"Table + Question Answering (Table QA) is the answering a question about an information + on a given table.\",\"widgetModels\":[\"google/tapas-base-finetuned-wtq\"],\"id\":\"table-question-answering\",\"label\":\"Table + Question Answering\",\"libraries\":[\"transformers\"]},\"tabular-classification\":{\"datasets\":[{\"description\":\"A + comprehensive curation of datasets covering all benchmarks.\",\"id\":\"inria-soda/tabular-benchmark\"}],\"demo\":{\"inputs\":[{\"table\":[[\"Glucose\",\"Blood + Pressure \",\"Skin Thickness\",\"Insulin\",\"BMI\"],[\"148\",\"72\",\"35\",\"0\",\"33.6\"],[\"150\",\"50\",\"30\",\"0\",\"35.1\"],[\"141\",\"60\",\"29\",\"1\",\"39.2\"]],\"type\":\"tabular\"}],\"outputs\":[{\"table\":[[\"Diabetes\"],[\"1\"],[\"1\"],[\"0\"]],\"type\":\"tabular\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"Breast + cancer prediction model based on decision trees.\",\"id\":\"scikit-learn/cancer-prediction-trees\"}],\"spaces\":[{\"description\":\"An + application that can predict defective products on a production line.\",\"id\":\"scikit-learn/tabular-playground\"},{\"description\":\"An + application that compares various tabular classification techniques on different + datasets.\",\"id\":\"scikit-learn/classification\"}],\"summary\":\"Tabular + classification is the task of classifying a target category (a group) based + on set of attributes.\",\"widgetModels\":[\"scikit-learn/tabular-playground\"],\"youtubeId\":\"\",\"id\":\"tabular-classification\",\"label\":\"Tabular + Classification\",\"libraries\":[\"sklearn\"]},\"tabular-regression\":{\"datasets\":[{\"description\":\"A + comprehensive curation of datasets covering all benchmarks.\",\"id\":\"inria-soda/tabular-benchmark\"}],\"demo\":{\"inputs\":[{\"table\":[[\"Car + Name\",\"Horsepower\",\"Weight\"],[\"ford torino\",\"140\",\"3,449\"],[\"amc + hornet\",\"97\",\"2,774\"],[\"toyota corolla\",\"65\",\"1,773\"]],\"type\":\"tabular\"}],\"outputs\":[{\"table\":[[\"MPG + (miles per gallon)\"],[\"17\"],[\"18\"],[\"31\"]],\"type\":\"tabular\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"mse\"},{\"description\":\"Coefficient + of determination (or R-squared) is a measure of how well the model fits the + data. Higher R-squared is considered a better fit.\",\"id\":\"r-squared\"}],\"models\":[{\"description\":\"Fish + weight prediction based on length measurements and species.\",\"id\":\"scikit-learn/Fish-Weight\"}],\"spaces\":[{\"description\":\"An + application that can predict weight of a fish based on set of attributes.\",\"id\":\"scikit-learn/fish-weight-prediction\"}],\"summary\":\"Tabular + regression is the task of predicting a numerical value given a set of attributes.\",\"widgetModels\":[\"scikit-learn/Fish-Weight\"],\"youtubeId\":\"\",\"id\":\"tabular-regression\",\"label\":\"Tabular + Regression\",\"libraries\":[\"sklearn\"]},\"text-classification\":{\"datasets\":[{\"description\":\"A + widely used dataset used to benchmark multiple variants of text classification.\",\"id\":\"nyu-mll/glue\"},{\"description\":\"A + text classification dataset used to benchmark natural language inference models\",\"id\":\"stanfordnlp/snli\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"I + love Hugging Face!\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"POSITIVE\",\"score\":0.9},{\"label\":\"NEUTRAL\",\"score\":0.1},{\"label\":\"NEGATIVE\",\"score\":0}]}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"The + F1 metric is the harmonic mean of the precision and recall. It can be calculated + as: F1 = 2 * (precision * recall) / (precision + recall)\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + robust model trained for sentiment analysis.\",\"id\":\"distilbert/distilbert-base-uncased-finetuned-sst-2-english\"},{\"description\":\"A + sentiment analysis model specialized in financial sentiment.\",\"id\":\"ProsusAI/finbert\"},{\"description\":\"A + sentiment analysis model specialized in analyzing tweets.\",\"id\":\"cardiffnlp/twitter-roberta-base-sentiment-latest\"},{\"description\":\"A + model that can classify languages.\",\"id\":\"papluca/xlm-roberta-base-language-detection\"},{\"description\":\"A + model that can classify text generation attacks.\",\"id\":\"meta-llama/Prompt-Guard-86M\"}],\"spaces\":[{\"description\":\"An + application that can classify financial sentiment.\",\"id\":\"IoannisTr/Tech_Stocks_Trading_Assistant\"},{\"description\":\"A + dashboard that contains various text classification tasks.\",\"id\":\"miesnerjacob/Multi-task-NLP\"},{\"description\":\"An + application that analyzes user reviews in healthcare.\",\"id\":\"spacy/healthsea-demo\"}],\"summary\":\"Text + Classification is the task of assigning a label or class to a given text. + Some use cases are sentiment analysis, natural language inference, and assessing + grammatical correctness.\",\"widgetModels\":[\"distilbert/distilbert-base-uncased-finetuned-sst-2-english\"],\"youtubeId\":\"leNG9fN9FQU\",\"id\":\"text-classification\",\"label\":\"Text + Classification\",\"libraries\":[\"adapter-transformers\",\"setfit\",\"spacy\",\"transformers\",\"transformers.js\"]},\"text-generation\":{\"datasets\":[{\"description\":\"A + large multilingual dataset of text crawled from the web.\",\"id\":\"mc4\"},{\"description\":\"Diverse + open-source data consisting of 22 smaller high-quality datasets. It was used + to train GPT-Neo.\",\"id\":\"the_pile\"},{\"description\":\"Truly open-source, + curated and cleaned dialogue dataset.\",\"id\":\"HuggingFaceH4/ultrachat_200k\"},{\"description\":\"An + instruction dataset with preference ratings on responses.\",\"id\":\"openbmb/UltraFeedback\"},{\"description\":\"A + large synthetic dataset for alignment of text generation models.\",\"id\":\"argilla/magpie-ultra-v0.1\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"Once + upon a time,\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Output\",\"content\":\"Once + upon a time, we knew that our ancestors were on the verge of extinction. The + great explorers and poets of the Old World, from Alexander the Great to Chaucer, + are dead and gone. A good many of our ancient explorers and poets have\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Cross + Entropy is a metric that calculates the difference between two probability + distributions. Each probability distribution is the distribution of predicted + words\",\"id\":\"Cross Entropy\"},{\"description\":\"The Perplexity metric + is the exponential of the cross-entropy loss. It evaluates the probabilities + assigned to the next word by the model. Lower perplexity indicates better + performance\",\"id\":\"Perplexity\"}],\"models\":[{\"description\":\"A text-generation + model trained to follow instructions.\",\"id\":\"google/gemma-2-2b-it\"},{\"description\":\"Very + powerful text generation model trained to follow instructions.\",\"id\":\"meta-llama/Meta-Llama-3.1-8B-Instruct\"},{\"description\":\"Small + yet powerful text generation model.\",\"id\":\"microsoft/Phi-3-mini-4k-instruct\"},{\"description\":\"A + very powerful model that can solve mathematical problems.\",\"id\":\"AI-MO/NuminaMath-7B-TIR\"},{\"description\":\"Strong + text generation model to follow instructions.\",\"id\":\"Qwen/Qwen2.5-7B-Instruct\"},{\"description\":\"Very + strong open-source large language model.\",\"id\":\"nvidia/Llama-3.1-Nemotron-70B-Instruct\"}],\"spaces\":[{\"description\":\"A + leaderboard to compare different open-source text generation models based + on various benchmarks.\",\"id\":\"open-llm-leaderboard/open_llm_leaderboard\"},{\"description\":\"A + leaderboard for comparing chain-of-thought performance of models.\",\"id\":\"logikon/open_cot_leaderboard\"},{\"description\":\"An + text generation based application based on a very powerful LLaMA2 model.\",\"id\":\"ysharma/Explore_llamav2_with_TGI\"},{\"description\":\"An + text generation based application to converse with Zephyr model.\",\"id\":\"HuggingFaceH4/zephyr-chat\"},{\"description\":\"A + leaderboard that ranks text generation models based on blind votes from people.\",\"id\":\"lmsys/chatbot-arena-leaderboard\"},{\"description\":\"An + chatbot to converse with a very powerful text generation model.\",\"id\":\"mlabonne/phixtral-chat\"}],\"summary\":\"Generating + text is the task of generating new text given another text. These models can, + for example, fill in incomplete text or paraphrase.\",\"widgetModels\":[\"mistralai/Mistral-Nemo-Instruct-2407\"],\"youtubeId\":\"e9gNEAlsOvU\",\"id\":\"text-generation\",\"label\":\"Text + Generation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"text-to-image\":{\"datasets\":[{\"description\":\"RedCaps + is a large-scale dataset of 12M image-text pairs collected from Reddit.\",\"id\":\"red_caps\"},{\"description\":\"Conceptual + Captions is a dataset consisting of ~3.3M images annotated with captions.\",\"id\":\"conceptual_captions\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"A + city above clouds, pastel colors, Victorian style\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"image.jpeg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + Inception Score (IS) measure assesses diversity and meaningfulness. It uses + a generated image sample to predict its label. A higher score signifies more + diverse and meaningful images.\",\"id\":\"IS\"},{\"description\":\"The Fr\xE9chet + Inception Distance (FID) calculates the distance between distributions between + synthetic and real samples. A lower FID score indicates better similarity + between the distributions of real and generated images.\",\"id\":\"FID\"},{\"description\":\"R-precision + assesses how the generated image aligns with the provided text description. + It uses the generated images as queries to retrieve relevant text descriptions. + The top 'r' relevant descriptions are selected and used to calculate R-precision + as r/R, where 'R' is the number of ground truth descriptions associated with + the generated images. A higher R-precision value indicates a better model.\",\"id\":\"R-Precision\"}],\"models\":[{\"description\":\"One + of the most powerful image generation models that can generate realistic outputs.\",\"id\":\"black-forest-labs/FLUX.1-dev\"},{\"description\":\"A + powerful yet fast image generation model.\",\"id\":\"latent-consistency/lcm-lora-sdxl\"},{\"description\":\"Text-to-image + model for photorealistic generation.\",\"id\":\"Kwai-Kolors/Kolors\"},{\"description\":\"A + powerful text-to-image model.\",\"id\":\"stabilityai/stable-diffusion-3-medium-diffusers\"}],\"spaces\":[{\"description\":\"A + powerful text-to-image application.\",\"id\":\"stabilityai/stable-diffusion-3-medium\"},{\"description\":\"A + text-to-image application to generate comics.\",\"id\":\"jbilcke-hf/ai-comic-factory\"},{\"description\":\"An + application to match multiple custom image generation models.\",\"id\":\"multimodalart/flux-lora-lab\"},{\"description\":\"A + powerful yet very fast image generation application.\",\"id\":\"latent-consistency/lcm-lora-for-sdxl\"},{\"description\":\"A + gallery to explore various text-to-image models.\",\"id\":\"multimodalart/LoraTheExplorer\"},{\"description\":\"An + application for `text-to-image`, `image-to-image` and image inpainting.\",\"id\":\"ArtGAN/Stable-Diffusion-ControlNet-WebUI\"},{\"description\":\"An + application to generate realistic images given photos of a person and a prompt.\",\"id\":\"InstantX/InstantID\"}],\"summary\":\"Text-to-image + is the task of generating images from input text. These pipelines can also + be used to modify and edit images based on text prompts.\",\"widgetModels\":[\"black-forest-labs/FLUX.1-dev\"],\"youtubeId\":\"\",\"id\":\"text-to-image\",\"label\":\"Text-to-Image\",\"libraries\":[\"diffusers\"]},\"text-to-speech\":{\"canonicalId\":\"text-to-audio\",\"datasets\":[{\"description\":\"10K + hours of multi-speaker English dataset.\",\"id\":\"parler-tts/mls_eng_10k\"},{\"description\":\"Multi-speaker + English dataset.\",\"id\":\"mythicinfinity/libritts_r\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"I + love audio models on the Hub!\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"audio.wav\",\"type\":\"audio\"}]},\"metrics\":[{\"description\":\"The + Mel Cepstral Distortion (MCD) metric is used to calculate the quality of generated + speech.\",\"id\":\"mel cepstral distortion\"}],\"models\":[{\"description\":\"A + powerful TTS model.\",\"id\":\"parler-tts/parler-tts-large-v1\"},{\"description\":\"A + massively multi-lingual TTS model.\",\"id\":\"coqui/XTTS-v2\"},{\"description\":\"Robust + TTS model.\",\"id\":\"metavoiceio/metavoice-1B-v0.1\"},{\"description\":\"A + prompt based, powerful TTS model.\",\"id\":\"parler-tts/parler_tts_mini_v0.1\"}],\"spaces\":[{\"description\":\"An + application for generate highly realistic, multilingual speech.\",\"id\":\"suno/bark\"},{\"description\":\"An + application on XTTS, a voice generation model that lets you clone voices into + different languages.\",\"id\":\"coqui/xtts\"},{\"description\":\"An application + that generates speech in different styles in English and Chinese.\",\"id\":\"mrfakename/E2-F5-TTS\"},{\"description\":\"An + application that synthesizes speech for diverse speaker prompts.\",\"id\":\"parler-tts/parler_tts_mini\"}],\"summary\":\"Text-to-Speech + (TTS) is the task of generating natural sounding speech given text input. + TTS models can be extended to have a single model that generates speech for + multiple speakers and multiple languages.\",\"widgetModels\":[\"suno/bark\"],\"youtubeId\":\"NW62DpzJ274\",\"id\":\"text-to-speech\",\"label\":\"Text-to-Speech\",\"libraries\":[\"espnet\",\"tensorflowtts\",\"transformers\",\"transformers.js\"]},\"text-to-video\":{\"datasets\":[{\"description\":\"Microsoft + Research Video to Text is a large-scale dataset for open domain video captioning\",\"id\":\"iejMac/CLIP-MSR-VTT\"},{\"description\":\"UCF101 + Human Actions dataset consists of 13,320 video clips from YouTube, with 101 + classes.\",\"id\":\"quchenyuan/UCF101-ZIP\"},{\"description\":\"A high-quality + dataset for human action recognition in YouTube videos.\",\"id\":\"nateraw/kinetics\"},{\"description\":\"A + dataset of video clips of humans performing pre-defined basic actions with + everyday objects.\",\"id\":\"HuggingFaceM4/something_something_v2\"},{\"description\":\"This + dataset consists of text-video pairs and contains noisy samples with irrelevant + video descriptions\",\"id\":\"HuggingFaceM4/webvid\"},{\"description\":\"A + dataset of short Flickr videos for the temporal localization of events with + descriptions.\",\"id\":\"iejMac/CLIP-DiDeMo\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"Darth + Vader is surfing on the waves.\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"text-to-video-output.gif\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"Inception + Score uses an image classification model that predicts class labels and evaluates + how distinct and diverse the images are. A higher score indicates better video + generation.\",\"id\":\"is\"},{\"description\":\"Frechet Inception Distance + uses an image classification model to obtain image embeddings. The metric + compares mean and standard deviation of the embeddings of real and generated + images. A smaller score indicates better video generation.\",\"id\":\"fid\"},{\"description\":\"Frechet + Video Distance uses a model that captures coherence for changes in frames + and the quality of each frame. A smaller score indicates better video generation.\",\"id\":\"fvd\"},{\"description\":\"CLIPSIM + measures similarity between video frames and text using an image-text similarity + model. A higher score indicates better video generation.\",\"id\":\"clipsim\"}],\"models\":[{\"description\":\"A + strong model for consistent video generation.\",\"id\":\"rain1011/pyramid-flow-sd3\"},{\"description\":\"A + robust model for text-to-video generation.\",\"id\":\"VideoCrafter/VideoCrafter2\"},{\"description\":\"A + cutting-edge text-to-video generation model.\",\"id\":\"TIGER-Lab/T2V-Turbo-V2\"}],\"spaces\":[{\"description\":\"An + application that generates video from text.\",\"id\":\"VideoCrafter/VideoCrafter\"},{\"description\":\"Consistent + video generation application.\",\"id\":\"TIGER-Lab/T2V-Turbo-V2\"},{\"description\":\"A + cutting edge video generation application.\",\"id\":\"Pyramid-Flow/pyramid-flow\"}],\"summary\":\"Text-to-video + models can be used in any application that requires generating consistent + sequence of images from text. \",\"widgetModels\":[],\"id\":\"text-to-video\",\"label\":\"Text-to-Video\",\"libraries\":[\"diffusers\"]},\"token-classification\":{\"datasets\":[{\"description\":\"A + widely used dataset useful to benchmark named entity recognition models.\",\"id\":\"eriktks/conll2003\"},{\"description\":\"A + multilingual dataset of Wikipedia articles annotated for named entity recognition + in over 150 different languages.\",\"id\":\"unimelb-nlp/wikiann\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"My + name is Omar and I live in Z\xFCrich.\",\"type\":\"text\"}],\"outputs\":[{\"text\":\"My + name is Omar and I live in Z\xFCrich.\",\"tokens\":[{\"type\":\"PERSON\",\"start\":11,\"end\":15},{\"type\":\"GPE\",\"start\":30,\"end\":36}],\"type\":\"text-with-tokens\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + robust performance model to identify people, locations, organizations and + names of miscellaneous entities.\",\"id\":\"dslim/bert-base-NER\"},{\"description\":\"A + strong model to identify people, locations, organizations and names in multiple + languages.\",\"id\":\"FacebookAI/xlm-roberta-large-finetuned-conll03-english\"},{\"description\":\"A + token classification model specialized on medical entity recognition.\",\"id\":\"blaze999/Medical-NER\"},{\"description\":\"Flair + models are typically the state of the art in named entity recognition tasks.\",\"id\":\"flair/ner-english\"}],\"spaces\":[{\"description\":\"An + application that can recognizes entities, extracts noun chunks and recognizes + various linguistic features of each token.\",\"id\":\"spacy/gradio_pipeline_visualizer\"}],\"summary\":\"Token + classification is a natural language understanding task in which a label is + assigned to some tokens in a text. Some popular token classification subtasks + are Named Entity Recognition (NER) and Part-of-Speech (PoS) tagging. NER models + could be trained to identify specific entities in a text, such as dates, individuals + and places; and PoS tagging would identify, for example, which words in a + text are verbs, nouns, and punctuation marks.\",\"widgetModels\":[\"FacebookAI/xlm-roberta-large-finetuned-conll03-english\"],\"youtubeId\":\"wVHdVlPScxA\",\"id\":\"token-classification\",\"label\":\"Token + Classification\",\"libraries\":[\"adapter-transformers\",\"flair\",\"spacy\",\"span-marker\",\"stanza\",\"transformers\",\"transformers.js\"]},\"translation\":{\"canonicalId\":\"text2text-generation\",\"datasets\":[{\"description\":\"A + dataset of copyright-free books translated into 16 different languages.\",\"id\":\"Helsinki-NLP/opus_books\"},{\"description\":\"An + example of translation between programming languages. This dataset consists + of functions in Java and C#.\",\"id\":\"google/code_x_glue_cc_code_to_code_trans\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"My + name is Omar and I live in Z\xFCrich.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Output\",\"content\":\"Mein + Name ist Omar und ich wohne in Z\xFCrich.\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"BLEU + score is calculated by counting the number of shared single or subsequent + tokens between the generated sequence and the reference. Subsequent n tokens + are called \u201Cn-grams\u201D. Unigram refers to a single token while bi-gram + refers to token pairs and n-grams refer to n subsequent tokens. The score + ranges from 0 to 1, where 1 means the translation perfectly matched and 0 + did not match at all\",\"id\":\"bleu\"},{\"description\":\"\",\"id\":\"sacrebleu\"}],\"models\":[{\"description\":\"Very + powerful model that can translate many languages between each other, especially + low-resource languages.\",\"id\":\"facebook/nllb-200-1.3B\"},{\"description\":\"A + general-purpose Transformer that can be used to translate from English to + German, French, or Romanian.\",\"id\":\"google-t5/t5-base\"}],\"spaces\":[{\"description\":\"An + application that can translate between 100 languages.\",\"id\":\"Iker/Translate-100-languages\"},{\"description\":\"An + application that can translate between many languages.\",\"id\":\"Geonmo/nllb-translation-demo\"}],\"summary\":\"Translation + is the task of converting text from one language to another.\",\"widgetModels\":[\"facebook/mbart-large-50-many-to-many-mmt\"],\"youtubeId\":\"1JvfrvZgi6c\",\"id\":\"translation\",\"label\":\"Translation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"unconditional-image-generation\":{\"datasets\":[{\"description\":\"The + CIFAR-100 dataset consists of 60000 32x32 colour images in 100 classes, with + 600 images per class.\",\"id\":\"cifar100\"},{\"description\":\"Multiple images + of celebrities, used for facial expression translation.\",\"id\":\"CelebA\"}],\"demo\":{\"inputs\":[{\"label\":\"Seed\",\"content\":\"42\",\"type\":\"text\"},{\"label\":\"Number + of images to generate:\",\"content\":\"4\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"unconditional-image-generation-output.jpeg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + inception score (IS) evaluates the quality of generated images. It measures + the diversity of the generated images (the model predictions are evenly distributed + across all possible labels) and their 'distinction' or 'sharpness' (the model + confidently predicts a single label for each image).\",\"id\":\"Inception + score (IS)\"},{\"description\":\"The Fr\xE9chet Inception Distance (FID) evaluates + the quality of images created by a generative model by calculating the distance + between feature vectors for real and generated images.\",\"id\":\"Fre\u0107het + Inception Distance (FID)\"}],\"models\":[{\"description\":\"High-quality image + generation model trained on the CIFAR-10 dataset. It synthesizes images of + the ten classes presented in the dataset using diffusion probabilistic models, + a class of latent variable models inspired by considerations from nonequilibrium + thermodynamics.\",\"id\":\"google/ddpm-cifar10-32\"},{\"description\":\"High-quality + image generation model trained on the 256x256 CelebA-HQ dataset. It synthesizes + images of faces using diffusion probabilistic models, a class of latent variable + models inspired by considerations from nonequilibrium thermodynamics.\",\"id\":\"google/ddpm-celebahq-256\"}],\"spaces\":[{\"description\":\"An + application that can generate realistic faces.\",\"id\":\"CompVis/celeba-latent-diffusion\"}],\"summary\":\"Unconditional + image generation is the task of generating images with no condition in any + context (like a prompt text or another image). Once trained, the model will + create images that resemble its training data distribution.\",\"widgetModels\":[\"\"],\"youtubeId\":\"\",\"id\":\"unconditional-image-generation\",\"label\":\"Unconditional + Image Generation\",\"libraries\":[\"diffusers\"]},\"video-text-to-text\":{\"datasets\":[{\"description\":\"Multiple-choice + questions and answers about videos.\",\"id\":\"lmms-lab/Video-MME\"},{\"description\":\"A + dataset of instructions and question-answer pairs about videos.\",\"id\":\"lmms-lab/VideoChatGPT\"},{\"description\":\"Large + video understanding dataset.\",\"id\":\"HuggingFaceFV/finevideo\"}],\"demo\":{\"inputs\":[{\"filename\":\"video-text-to-text-input.gif\",\"type\":\"img\"},{\"label\":\"Text + Prompt\",\"content\":\"What is happening in this video?\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"The + video shows a series of images showing a fountain with water jets and a variety + of colorful flowers and butterflies in the background.\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + robust video-text-to-text model that can take in image and video inputs.\",\"id\":\"llava-hf/llava-onevision-qwen2-72b-ov-hf\"},{\"description\":\"Large + and powerful video-text-to-text model that can take in image and video inputs.\",\"id\":\"llava-hf/LLaVA-NeXT-Video-34B-hf\"}],\"spaces\":[{\"description\":\"An + application to chat with a video-text-to-text model.\",\"id\":\"llava-hf/video-llava\"},{\"description\":\"A + leaderboard for various video-text-to-text models.\",\"id\":\"opencompass/openvlm_video_leaderboard\"}],\"summary\":\"Video-text-to-text + models take in a video and a text prompt and output text. These models are + also called video-language models.\",\"widgetModels\":[\"\"],\"youtubeId\":\"\",\"id\":\"video-text-to-text\",\"label\":\"Video-Text-to-Text\",\"libraries\":[\"transformers\"]},\"visual-question-answering\":{\"datasets\":[{\"description\":\"A + widely used dataset containing questions (with answers) about images.\",\"id\":\"Graphcore/vqa\"},{\"description\":\"A + dataset to benchmark visual reasoning based on text in images.\",\"id\":\"facebook/textvqa\"}],\"demo\":{\"inputs\":[{\"filename\":\"elephant.jpeg\",\"type\":\"img\"},{\"label\":\"Question\",\"content\":\"What + is in this image?\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"elephant\",\"score\":0.97},{\"label\":\"elephants\",\"score\":0.06},{\"label\":\"animal\",\"score\":0.003}]}]},\"isPlaceholder\":false,\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"Measures + how much a predicted answer differs from the ground truth based on the difference + in their semantic meaning.\",\"id\":\"wu-palmer similarity\"}],\"models\":[{\"description\":\"A + visual question answering model trained to convert charts and plots to text.\",\"id\":\"google/deplot\"},{\"description\":\"A + visual question answering model trained for mathematical reasoning and chart + derendering from images.\",\"id\":\"google/matcha-base\"},{\"description\":\"A + strong visual question answering that answers questions from book covers.\",\"id\":\"google/pix2struct-ocrvqa-large\"}],\"spaces\":[{\"description\":\"An + application that compares visual question answering models across different + tasks.\",\"id\":\"merve/pix2struct\"},{\"description\":\"An application that + can answer questions based on images.\",\"id\":\"nielsr/vilt-vqa\"},{\"description\":\"An + application that can caption images and answer questions about a given image. + \",\"id\":\"Salesforce/BLIP\"},{\"description\":\"An application that can + caption images and answer questions about a given image. \",\"id\":\"vumichien/Img2Prompt\"}],\"summary\":\"Visual + Question Answering is the task of answering open-ended questions based on + an image. They output natural language responses to natural language questions.\",\"widgetModels\":[\"dandelin/vilt-b32-finetuned-vqa\"],\"youtubeId\":\"\",\"id\":\"visual-question-answering\",\"label\":\"Visual + Question Answering\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"zero-shot-classification\":{\"datasets\":[{\"description\":\"A + widely used dataset used to benchmark multiple variants of text classification.\",\"id\":\"nyu-mll/glue\"},{\"description\":\"The + Multi-Genre Natural Language Inference (MultiNLI) corpus is a crowd-sourced + collection of 433k sentence pairs annotated with textual entailment information.\",\"id\":\"nyu-mll/multi_nli\"},{\"description\":\"FEVER + is a publicly available dataset for fact extraction and verification against + textual sources.\",\"id\":\"fever/fever\"}],\"demo\":{\"inputs\":[{\"label\":\"Text + Input\",\"content\":\"Dune is the best movie ever.\",\"type\":\"text\"},{\"label\":\"Candidate + Labels\",\"content\":\"CINEMA, ART, MUSIC\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"CINEMA\",\"score\":0.9},{\"label\":\"ART\",\"score\":0.1},{\"label\":\"MUSIC\",\"score\":0}]}]},\"metrics\":[],\"models\":[{\"description\":\"Powerful + zero-shot text classification model.\",\"id\":\"facebook/bart-large-mnli\"},{\"description\":\"Powerful + zero-shot multilingual text classification model that can accomplish multiple + tasks.\",\"id\":\"MoritzLaurer/mDeBERTa-v3-base-xnli-multilingual-nli-2mil7\"}],\"spaces\":[],\"summary\":\"Zero-shot + text classification is a task in natural language processing where a model + is trained on a set of labeled examples but is then able to classify new examples + from previously unseen classes.\",\"widgetModels\":[\"facebook/bart-large-mnli\"],\"id\":\"zero-shot-classification\",\"label\":\"Zero-Shot + Classification\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"zero-shot-image-classification\":{\"datasets\":[{\"description\":\"\",\"id\":\"\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-classification-input.jpeg\",\"type\":\"img\"},{\"label\":\"Classes\",\"content\":\"cat, + dog, bird\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Cat\",\"score\":0.664},{\"label\":\"Dog\",\"score\":0.329},{\"label\":\"Bird\",\"score\":0.008}]}]},\"metrics\":[{\"description\":\"Computes + the number of times the correct label appears in top K labels predicted\",\"id\":\"top-K + accuracy\"}],\"models\":[{\"description\":\"Robust image classification model + trained on publicly available image-caption data.\",\"id\":\"openai/clip-vit-base-patch16\"},{\"description\":\"Strong + zero-shot image classification model.\",\"id\":\"google/siglip-so400m-patch14-224\"},{\"description\":\"Small + yet powerful zero-shot image classification model that can run on edge devices.\",\"id\":\"apple/MobileCLIP-S1-OpenCLIP\"},{\"description\":\"Strong + image classification model for biomedical domain.\",\"id\":\"microsoft/BiomedCLIP-PubMedBERT_256-vit_base_patch16_224\"}],\"spaces\":[{\"description\":\"An + application that leverages zero-shot image classification to find best captions + to generate an image. \",\"id\":\"pharma/CLIP-Interrogator\"},{\"description\":\"An + application to compare different zero-shot image classification models. \",\"id\":\"merve/compare_clip_siglip\"}],\"summary\":\"Zero-shot + image classification is the task of classifying previously unseen classes + during training of a model.\",\"widgetModels\":[\"google/siglip-so400m-patch14-224\"],\"youtubeId\":\"\",\"id\":\"zero-shot-image-classification\",\"label\":\"Zero-Shot + Image Classification\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"zero-shot-object-detection\":{\"datasets\":[],\"demo\":{\"inputs\":[{\"filename\":\"zero-shot-object-detection-input.jpg\",\"type\":\"img\"},{\"label\":\"Classes\",\"content\":\"cat, + dog, bird\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"zero-shot-object-detection-output.jpg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + Average Precision (AP) metric is the Area Under the PR Curve (AUC-PR). It + is calculated for each class separately\",\"id\":\"Average Precision\"},{\"description\":\"The + Mean Average Precision (mAP) metric is the overall average of the AP values\",\"id\":\"Mean + Average Precision\"},{\"description\":\"The AP\u03B1 metric is the Average + Precision at the IoU threshold of a \u03B1 value, for example, AP50 and AP75\",\"id\":\"AP\u03B1\"}],\"models\":[{\"description\":\"Solid + zero-shot object detection model.\",\"id\":\"IDEA-Research/grounding-dino-base\"},{\"description\":\"Cutting-edge + zero-shot object detection model.\",\"id\":\"google/owlv2-base-patch16-ensemble\"}],\"spaces\":[{\"description\":\"A + demo to try the state-of-the-art zero-shot object detection model, OWLv2.\",\"id\":\"merve/owlv2\"},{\"description\":\"A + demo that combines a zero-shot object detection and mask generation model + for zero-shot segmentation.\",\"id\":\"merve/OWLSAM\"}],\"summary\":\"Zero-shot + object detection is a computer vision task to detect objects and their classes + in images, without any prior training or knowledge of the classes. Zero-shot + object detection models receive an image as input, as well as a list of candidate + classes, and output the bounding boxes and labels where the objects have been + detected.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"zero-shot-object-detection\",\"label\":\"Zero-Shot + Object Detection\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"text-to-3d\":{\"datasets\":[{\"description\":\"A + large dataset of over 10 million 3D objects.\",\"id\":\"allenai/objaverse-xl\"},{\"description\":\"Descriptive + captions for 3D objects in Objaverse.\",\"id\":\"tiange/Cap3D\"}],\"demo\":{\"inputs\":[{\"label\":\"Prompt\",\"content\":\"a + cat statue\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Result\",\"content\":\"text-to-3d-3d-output-filename.glb\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"Text-to-3D + mesh model by OpenAI\",\"id\":\"openai/shap-e\"},{\"description\":\"Generative + 3D gaussian splatting model.\",\"id\":\"ashawkey/LGM\"}],\"spaces\":[{\"description\":\"Text-to-3D + demo with mesh outputs.\",\"id\":\"hysts/Shap-E\"},{\"description\":\"Text/image-to-3D + demo with splat outputs.\",\"id\":\"ashawkey/LGM\"}],\"summary\":\"Text-to-3D + models take in text input and produce 3D output.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"text-to-3d\",\"label\":\"Text-to-3D\",\"libraries\":[\"diffusers\"]},\"image-to-3d\":{\"datasets\":[{\"description\":\"A + large dataset of over 10 million 3D objects.\",\"id\":\"allenai/objaverse-xl\"},{\"description\":\"A + dataset of isolated object images for evaluating image-to-3D models.\",\"id\":\"dylanebert/iso3d\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-to-3d-image-input.png\",\"type\":\"img\"}],\"outputs\":[{\"label\":\"Result\",\"content\":\"image-to-3d-3d-output-filename.glb\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"Fast + image-to-3D mesh model by Tencent.\",\"id\":\"TencentARC/InstantMesh\"},{\"description\":\"Fast + image-to-3D mesh model by StabilityAI\",\"id\":\"stabilityai/TripoSR\"},{\"description\":\"A + scaled up image-to-3D mesh model derived from TripoSR.\",\"id\":\"hwjiang/Real3D\"},{\"description\":\"Generative + 3D gaussian splatting model.\",\"id\":\"ashawkey/LGM\"}],\"spaces\":[{\"description\":\"Leaderboard + to evaluate image-to-3D models.\",\"id\":\"dylanebert/3d-arena\"},{\"description\":\"Image-to-3D + demo with mesh outputs.\",\"id\":\"TencentARC/InstantMesh\"},{\"description\":\"Image-to-3D + demo with mesh outputs.\",\"id\":\"stabilityai/TripoSR\"},{\"description\":\"Image-to-3D + demo with mesh outputs.\",\"id\":\"hwjiang/Real3D\"},{\"description\":\"Image-to-3D + demo with splat outputs.\",\"id\":\"dylanebert/LGM-mini\"}],\"summary\":\"Image-to-3D + models take in image input and produce 3D output.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"image-to-3d\",\"label\":\"Image-to-3D\",\"libraries\":[\"diffusers\"]}}" + headers: + Access-Control-Allow-Origin: + - https://huggingface.co + Access-Control-Expose-Headers: + - X-Repo-Commit,X-Request-Id,X-Error-Code,X-Error-Message,X-Total-Count,ETag,Link,Accept-Ranges,Content-Range + Connection: + - keep-alive + Content-Length: + - '73726' + Content-Type: + - application/json; charset=utf-8 + Date: + - Thu, 31 Oct 2024 13:49:59 GMT + ETag: + - W/"11ffe-LYeRHzSmA1Ja2XTx5UWNPygcpnc" + Referrer-Policy: + - strict-origin-when-cross-origin + Vary: + - Origin + Via: + - 1.1 0f8fd46039e719e265692b82b0d29f98.cloudfront.net (CloudFront) + X-Amz-Cf-Id: + - nVWhFsn506wPjVFEzAZN4Oa83fGf28ezhiG1FhFPCeLm5qJ-7z5SCA== + X-Amz-Cf-Pop: + - CCU50-P1 + X-Cache: + - Miss from cloudfront + X-Powered-By: + - huggingface-moon + X-Request-Id: + - Root=1-67238b07-546b0d2d4a8ada2722b6faa0;387de72a-a9c8-4552-9bbc-d72ef88c83af + cross-origin-opener-policy: + - same-origin + status: + code: 200 + message: OK +- request: + body: '{"inputs": "I like you"}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate, br, zstd + Connection: + - keep-alive + Content-Length: + - '24' + Content-Type: + - application/json + X-Amzn-Trace-Id: + - 0242411e-e5aa-43a9-9bf0-668198b8f29d + user-agent: + - unknown/None; hf_hub/0.26.2; python/3.9.12; torch/2.4.1 + method: POST + uri: https://api-inference.huggingface.co/models/distilbert/distilbert-base-uncased-finetuned-sst-2-english + response: + body: + string: '[[{"label":"POSITIVE","score":0.9998695850372314},{"label":"NEGATIVE","score":0.00013043530634604394}]]' + headers: + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Thu, 31 Oct 2024 13:50:00 GMT + Transfer-Encoding: + - chunked + access-control-allow-credentials: + - 'true' + access-control-expose-headers: + - x-compute-type, x-compute-time + server: + - uvicorn + vary: + - Origin, Access-Control-Request-Method, Access-Control-Request-Headers + x-compute-characters: + - '10' + x-compute-time: + - '0.016' + x-compute-type: + - cpu + x-request-id: + - mIGvD664imOPpJrH6GWNx + x-sha: + - 714eb0fa89d2f80546fda750413ed43d93601a13 + status: + code: 200 + message: OK +version: 1 diff --git a/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_text_classification_async.yaml b/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_text_classification_async.yaml new file mode 100644 index 00000000000..8c99581bceb --- /dev/null +++ b/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_text_classification_async.yaml @@ -0,0 +1,772 @@ +interactions: +- request: + body: null + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate, br, zstd + Connection: + - keep-alive + X-Amzn-Trace-Id: + - 0d5401a3-1cb7-41e8-92ee-1c9fbb42c279 + user-agent: + - unknown/None; hf_hub/0.26.2; python/3.9.12; torch/2.4.1 + method: GET + uri: https://huggingface.co/api/tasks + response: + body: + string: "{\"any-to-any\":{\"datasets\":[],\"demo\":{\"inputs\":[],\"outputs\":[]},\"isPlaceholder\":true,\"metrics\":[],\"models\":[],\"spaces\":[],\"summary\":\"\",\"widgetModels\":[],\"id\":\"any-to-any\",\"label\":\"Any-to-Any\",\"libraries\":[\"transformers\"]},\"audio-classification\":{\"datasets\":[{\"description\":\"A + benchmark of 10 different audio tasks.\",\"id\":\"s3prl/superb\"},{\"description\":\"A + dataset of YouTube clips and their sound categories.\",\"id\":\"agkphysics/AudioSet\"}],\"demo\":{\"inputs\":[{\"filename\":\"audio.wav\",\"type\":\"audio\"}],\"outputs\":[{\"data\":[{\"label\":\"Up\",\"score\":0.2},{\"label\":\"Down\",\"score\":0.8}],\"type\":\"chart\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"An + easy-to-use model for command recognition.\",\"id\":\"speechbrain/google_speech_command_xvector\"},{\"description\":\"An + emotion recognition model.\",\"id\":\"ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition\"},{\"description\":\"A + language identification model.\",\"id\":\"facebook/mms-lid-126\"}],\"spaces\":[{\"description\":\"An + application that can classify music into different genre.\",\"id\":\"kurianbenoy/audioclassification\"}],\"summary\":\"Audio + classification is the task of assigning a label or class to a given audio. + It can be used for recognizing which command a user is giving or the emotion + of a statement, as well as identifying a speaker.\",\"widgetModels\":[\"MIT/ast-finetuned-audioset-10-10-0.4593\"],\"youtubeId\":\"KWwzcmG98Ds\",\"id\":\"audio-classification\",\"label\":\"Audio + Classification\",\"libraries\":[\"speechbrain\",\"transformers\",\"transformers.js\"]},\"audio-to-audio\":{\"datasets\":[{\"description\":\"512-element + X-vector embeddings of speakers from CMU ARCTIC dataset.\",\"id\":\"Matthijs/cmu-arctic-xvectors\"}],\"demo\":{\"inputs\":[{\"filename\":\"input.wav\",\"type\":\"audio\"}],\"outputs\":[{\"filename\":\"label-0.wav\",\"type\":\"audio\"},{\"filename\":\"label-1.wav\",\"type\":\"audio\"}]},\"metrics\":[{\"description\":\"The + Signal-to-Noise ratio is the relationship between the target signal level + and the background noise level. It is calculated as the logarithm of the target + signal divided by the background noise, in decibels.\",\"id\":\"snri\"},{\"description\":\"The + Signal-to-Distortion ratio is the relationship between the target signal and + the sum of noise, interference, and artifact errors\",\"id\":\"sdri\"}],\"models\":[{\"description\":\"A + solid model of audio source separation.\",\"id\":\"speechbrain/sepformer-wham\"},{\"description\":\"A + speech enhancement model.\",\"id\":\"ResembleAI/resemble-enhance\"},{\"description\":\"A + model that can change the voice in a speech recording.\",\"id\":\"microsoft/speecht5_vc\"}],\"spaces\":[{\"description\":\"An + application for speech separation.\",\"id\":\"younver/speechbrain-speech-separation\"},{\"description\":\"An + application for audio style transfer.\",\"id\":\"nakas/audio-diffusion_style_transfer\"}],\"summary\":\"Audio-to-Audio + is a family of tasks in which the input is an audio and the output is one + or multiple generated audios. Some example tasks are speech enhancement and + source separation.\",\"widgetModels\":[\"speechbrain/sepformer-wham\"],\"youtubeId\":\"iohj7nCCYoM\",\"id\":\"audio-to-audio\",\"label\":\"Audio-to-Audio\",\"libraries\":[\"asteroid\",\"fairseq\",\"speechbrain\"]},\"automatic-speech-recognition\":{\"datasets\":[{\"description\":\"31,175 + hours of multilingual audio-text dataset in 108 languages.\",\"id\":\"mozilla-foundation/common_voice_17_0\"},{\"description\":\"A + dataset with 44.6k hours of English speaker data and 6k hours of other language + speakers.\",\"id\":\"parler-tts/mls_eng\"},{\"description\":\"A multi-lingual + audio dataset with 370K hours of audio.\",\"id\":\"espnet/yodas\"}],\"demo\":{\"inputs\":[{\"filename\":\"input.flac\",\"type\":\"audio\"}],\"outputs\":[{\"label\":\"Transcript\",\"content\":\"Going + along slushy country roads and speaking to damp audiences in...\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"wer\"},{\"description\":\"\",\"id\":\"cer\"}],\"models\":[{\"description\":\"A + powerful ASR model by OpenAI.\",\"id\":\"openai/whisper-large-v3\"},{\"description\":\"A + good generic speech model by MetaAI for fine-tuning.\",\"id\":\"facebook/w2v-bert-2.0\"},{\"description\":\"An + end-to-end model that performs ASR and Speech Translation by MetaAI.\",\"id\":\"facebook/seamless-m4t-v2-large\"},{\"description\":\"Powerful + speaker diarization model.\",\"id\":\"pyannote/speaker-diarization-3.1\"}],\"spaces\":[{\"description\":\"A + powerful general-purpose speech recognition application.\",\"id\":\"hf-audio/whisper-large-v3\"},{\"description\":\"Fastest + speech recognition application.\",\"id\":\"sanchit-gandhi/whisper-jax\"},{\"description\":\"A + high quality speech and text translation model by Meta.\",\"id\":\"facebook/seamless_m4t\"}],\"summary\":\"Automatic + Speech Recognition (ASR), also known as Speech to Text (STT), is the task + of transcribing a given audio to text. It has many applications, such as voice + user interfaces.\",\"widgetModels\":[\"openai/whisper-large-v3\"],\"youtubeId\":\"TksaY_FDgnk\",\"id\":\"automatic-speech-recognition\",\"label\":\"Automatic + Speech Recognition\",\"libraries\":[\"espnet\",\"nemo\",\"speechbrain\",\"transformers\",\"transformers.js\"]},\"depth-estimation\":{\"datasets\":[{\"description\":\"NYU + Depth V2 Dataset: Video dataset containing both RGB and depth sensor data.\",\"id\":\"sayakpaul/nyu_depth_v2\"},{\"description\":\"Monocular + depth estimation benchmark based without noise and errors.\",\"id\":\"depth-anything/DA-2K\"}],\"demo\":{\"inputs\":[{\"filename\":\"depth-estimation-input.jpg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"depth-estimation-output.png\",\"type\":\"img\"}]},\"metrics\":[],\"models\":[{\"description\":\"Cutting-edge + depth estimation model.\",\"id\":\"depth-anything/Depth-Anything-V2-Large\"},{\"description\":\"A + strong monocular depth estimation model.\",\"id\":\"jingheya/lotus-depth-g-v1-0\"},{\"description\":\"A + depth estimation model that predicts depth in videos.\",\"id\":\"tencent/DepthCrafter\"},{\"description\":\"A + robust depth estimation model.\",\"id\":\"apple/DepthPro\"}],\"spaces\":[{\"description\":\"An + application that predicts the depth of an image and then reconstruct the 3D + model as voxels.\",\"id\":\"radames/dpt-depth-estimation-3d-voxels\"},{\"description\":\"An + application for bleeding-edge depth estimation.\",\"id\":\"akhaliq/depth-pro\"},{\"description\":\"An + application on cutting-edge depth estimation in videos.\",\"id\":\"tencent/DepthCrafter\"},{\"description\":\"A + human-centric depth estimation application.\",\"id\":\"facebook/sapiens-depth\"}],\"summary\":\"Depth + estimation is the task of predicting depth of the objects present in an image.\",\"widgetModels\":[\"\"],\"youtubeId\":\"\",\"id\":\"depth-estimation\",\"label\":\"Depth + Estimation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"document-question-answering\":{\"datasets\":[{\"description\":\"Largest + document understanding dataset.\",\"id\":\"HuggingFaceM4/Docmatix\"},{\"description\":\"Dataset + from the 2020 DocVQA challenge. The documents are taken from the UCSF Industry + Documents Library.\",\"id\":\"eliolio/docvqa\"}],\"demo\":{\"inputs\":[{\"label\":\"Question\",\"content\":\"What + is the idea behind the consumer relations efficiency team?\",\"type\":\"text\"},{\"filename\":\"document-question-answering-input.png\",\"type\":\"img\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"Balance + cost efficiency with quality customer service\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"The + evaluation metric for the DocVQA challenge is the Average Normalized Levenshtein + Similarity (ANLS). This metric is flexible to character regognition errors + and compares the predicted answer with the ground truth answer.\",\"id\":\"anls\"},{\"description\":\"Exact + Match is a metric based on the strict character match of the predicted answer + and the right answer. For answers predicted correctly, the Exact Match will + be 1. Even if only one character is different, Exact Match will be 0\",\"id\":\"exact-match\"}],\"models\":[{\"description\":\"A + robust document question answering model.\",\"id\":\"impira/layoutlm-document-qa\"},{\"description\":\"A + document question answering model specialized in invoices.\",\"id\":\"impira/layoutlm-invoices\"},{\"description\":\"A + special model for OCR-free document question answering.\",\"id\":\"microsoft/udop-large\"},{\"description\":\"A + powerful model for document question answering.\",\"id\":\"google/pix2struct-docvqa-large\"}],\"spaces\":[{\"description\":\"A + robust document question answering application.\",\"id\":\"impira/docquery\"},{\"description\":\"An + application that can answer questions from invoices.\",\"id\":\"impira/invoices\"},{\"description\":\"An + application to compare different document question answering models.\",\"id\":\"merve/compare_docvqa_models\"}],\"summary\":\"Document + Question Answering (also known as Document Visual Question Answering) is the + task of answering questions on document images. Document question answering + models take a (document, question) pair as input and return an answer in natural + language. Models usually rely on multi-modal features, combining text, position + of words (bounding-boxes) and image.\",\"widgetModels\":[\"impira/layoutlm-invoices\"],\"youtubeId\":\"\",\"id\":\"document-question-answering\",\"label\":\"Document + Question Answering\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"feature-extraction\":{\"datasets\":[{\"description\":\"Wikipedia + dataset containing cleaned articles of all languages. Can be used to train + `feature-extraction` models.\",\"id\":\"wikipedia\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"India, + officially the Republic of India, is a country in South Asia.\",\"type\":\"text\"}],\"outputs\":[{\"table\":[[\"Dimension + 1\",\"Dimension 2\",\"Dimension 3\"],[\"2.583383083343506\",\"2.757075071334839\",\"0.9023529887199402\"],[\"8.29393482208252\",\"1.1071064472198486\",\"2.03399395942688\"],[\"-0.7754912972450256\",\"-1.647324562072754\",\"-0.6113331913948059\"],[\"0.07087723910808563\",\"1.5942802429199219\",\"1.4610432386398315\"]],\"type\":\"tabular\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + powerful feature extraction model for natural language processing tasks.\",\"id\":\"thenlper/gte-large\"},{\"description\":\"A + strong feature extraction model for retrieval.\",\"id\":\"Alibaba-NLP/gte-Qwen1.5-7B-instruct\"}],\"spaces\":[{\"description\":\"A + leaderboard to rank text feature extraction models based on a benchmark.\",\"id\":\"mteb/leaderboard\"},{\"description\":\"A + leaderboard to rank best feature extraction models based on human feedback.\",\"id\":\"mteb/arena\"}],\"summary\":\"Feature + extraction is the task of extracting features learnt in a model.\",\"widgetModels\":[\"facebook/bart-base\"],\"id\":\"feature-extraction\",\"label\":\"Feature + Extraction\",\"libraries\":[\"sentence-transformers\",\"transformers\",\"transformers.js\"]},\"fill-mask\":{\"datasets\":[{\"description\":\"A + common dataset that is used to train models for many languages.\",\"id\":\"wikipedia\"},{\"description\":\"A + large English dataset with text crawled from the web.\",\"id\":\"c4\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"The + barked at me\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"wolf\",\"score\":0.487},{\"label\":\"dog\",\"score\":0.061},{\"label\":\"cat\",\"score\":0.058},{\"label\":\"fox\",\"score\":0.047},{\"label\":\"squirrel\",\"score\":0.025}]}]},\"metrics\":[{\"description\":\"Cross + Entropy is a metric that calculates the difference between two probability + distributions. Each probability distribution is the distribution of predicted + words\",\"id\":\"cross_entropy\"},{\"description\":\"Perplexity is the exponential + of the cross-entropy loss. It evaluates the probabilities assigned to the + next word by the model. Lower perplexity indicates better performance\",\"id\":\"perplexity\"}],\"models\":[{\"description\":\"The + famous BERT model.\",\"id\":\"google-bert/bert-base-uncased\"},{\"description\":\"A + multilingual model trained on 100 languages.\",\"id\":\"FacebookAI/xlm-roberta-base\"}],\"spaces\":[],\"summary\":\"Masked + language modeling is the task of masking some of the words in a sentence and + predicting which words should replace those masks. These models are useful + when we want to get a statistical understanding of the language in which the + model is trained in.\",\"widgetModels\":[\"distilroberta-base\"],\"youtubeId\":\"mqElG5QJWUg\",\"id\":\"fill-mask\",\"label\":\"Fill-Mask\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"image-classification\":{\"datasets\":[{\"description\":\"Benchmark + dataset used for image classification with images that belong to 100 classes.\",\"id\":\"cifar100\"},{\"description\":\"Dataset + consisting of images of garments.\",\"id\":\"fashion_mnist\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-classification-input.jpeg\",\"type\":\"img\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Egyptian + cat\",\"score\":0.514},{\"label\":\"Tabby cat\",\"score\":0.193},{\"label\":\"Tiger + cat\",\"score\":0.068}]}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + strong image classification model.\",\"id\":\"google/vit-base-patch16-224\"},{\"description\":\"A + robust image classification model.\",\"id\":\"facebook/deit-base-distilled-patch16-224\"},{\"description\":\"A + strong image classification model.\",\"id\":\"facebook/convnext-large-224\"}],\"spaces\":[{\"description\":\"An + application that classifies what a given image is about.\",\"id\":\"nielsr/perceiver-image-classification\"}],\"summary\":\"Image + classification is the task of assigning a label or class to an entire image. + Images are expected to have only one class for each image. Image classification + models take an image as input and return a prediction about which class the + image belongs to.\",\"widgetModels\":[\"google/vit-base-patch16-224\"],\"youtubeId\":\"tjAIM7BOYhw\",\"id\":\"image-classification\",\"label\":\"Image + Classification\",\"libraries\":[\"keras\",\"timm\",\"transformers\",\"transformers.js\"]},\"image-feature-extraction\":{\"datasets\":[{\"description\":\"ImageNet-1K + is a image classification dataset in which images are used to train image-feature-extraction + models.\",\"id\":\"imagenet-1k\"}],\"demo\":{\"inputs\":[{\"filename\":\"mask-generation-input.png\",\"type\":\"img\"}],\"outputs\":[{\"table\":[[\"Dimension + 1\",\"Dimension 2\",\"Dimension 3\"],[\"0.21236686408519745\",\"1.0919708013534546\",\"0.8512550592422485\"],[\"0.809657871723175\",\"-0.18544459342956543\",\"-0.7851548194885254\"],[\"1.3103108406066895\",\"-0.2479034662246704\",\"-0.9107287526130676\"],[\"1.8536205291748047\",\"-0.36419737339019775\",\"0.09717650711536407\"]],\"type\":\"tabular\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + powerful image feature extraction model.\",\"id\":\"timm/vit_large_patch14_dinov2.lvd142m\"},{\"description\":\"A + strong image feature extraction model.\",\"id\":\"nvidia/MambaVision-T-1K\"},{\"description\":\"A + robust image feature extraction model.\",\"id\":\"facebook/dino-vitb16\"},{\"description\":\"Strong + image feature extraction model made for information retrieval from documents.\",\"id\":\"vidore/colpali\"},{\"description\":\"Strong + image feature extraction model that can be used on images and documents.\",\"id\":\"OpenGVLab/InternViT-6B-448px-V1-2\"}],\"spaces\":[],\"summary\":\"Image + feature extraction is the task of extracting features learnt in a computer + vision model.\",\"widgetModels\":[],\"id\":\"image-feature-extraction\",\"label\":\"Image + Feature Extraction\",\"libraries\":[\"timm\",\"transformers\"]},\"image-segmentation\":{\"datasets\":[{\"description\":\"Scene + segmentation dataset.\",\"id\":\"scene_parse_150\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-segmentation-input.jpeg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"image-segmentation-output.png\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"Average + Precision (AP) is the Area Under the PR Curve (AUC-PR). It is calculated for + each semantic class separately\",\"id\":\"Average Precision\"},{\"description\":\"Mean + Average Precision (mAP) is the overall average of the AP values\",\"id\":\"Mean + Average Precision\"},{\"description\":\"Intersection over Union (IoU) is the + overlap of segmentation masks. Mean IoU is the average of the IoU of all semantic + classes\",\"id\":\"Mean Intersection over Union\"},{\"description\":\"AP\u03B1 + is the Average Precision at the IoU threshold of a \u03B1 value, for example, + AP50 and AP75\",\"id\":\"AP\u03B1\"}],\"models\":[{\"description\":\"Solid + semantic segmentation model trained on ADE20k.\",\"id\":\"openmmlab/upernet-convnext-small\"},{\"description\":\"Background + removal model.\",\"id\":\"briaai/RMBG-1.4\"},{\"description\":\"A multipurpose + image segmentation model for high resolution images.\",\"id\":\"ZhengPeng7/BiRefNet\"},{\"description\":\"Powerful + human-centric image segmentation model.\",\"id\":\"facebook/sapiens-seg-1b\"},{\"description\":\"Panoptic + segmentation model trained on the COCO (common objects) dataset.\",\"id\":\"facebook/mask2former-swin-large-coco-panoptic\"}],\"spaces\":[{\"description\":\"A + semantic segmentation application that can predict unseen instances out of + the box.\",\"id\":\"facebook/ov-seg\"},{\"description\":\"One of the strongest + segmentation applications.\",\"id\":\"jbrinkma/segment-anything\"},{\"description\":\"A + human-centric segmentation model.\",\"id\":\"facebook/sapiens-pose\"},{\"description\":\"An + instance segmentation application to predict neuronal cell types from microscopy + images.\",\"id\":\"rashmi/sartorius-cell-instance-segmentation\"},{\"description\":\"An + application that segments videos.\",\"id\":\"ArtGAN/Segment-Anything-Video\"},{\"description\":\"An + panoptic segmentation application built for outdoor environments.\",\"id\":\"segments/panoptic-segment-anything\"}],\"summary\":\"Image + Segmentation divides an image into segments where each pixel in the image + is mapped to an object. This task has multiple variants such as instance segmentation, + panoptic segmentation and semantic segmentation.\",\"widgetModels\":[\"nvidia/segformer-b0-finetuned-ade-512-512\"],\"youtubeId\":\"dKE8SIt9C-w\",\"id\":\"image-segmentation\",\"label\":\"Image + Segmentation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"image-to-image\":{\"datasets\":[{\"description\":\"Synthetic + dataset, for image relighting\",\"id\":\"VIDIT\"},{\"description\":\"Multiple + images of celebrities, used for facial expression translation\",\"id\":\"huggan/CelebA-faces\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-to-image-input.jpeg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"image-to-image-output.png\",\"type\":\"img\"}]},\"isPlaceholder\":false,\"metrics\":[{\"description\":\"Peak + Signal to Noise Ratio (PSNR) is an approximation of the human perception, + considering the ratio of the absolute intensity with respect to the variations. + Measured in dB, a high value indicates a high fidelity.\",\"id\":\"PSNR\"},{\"description\":\"Structural + Similarity Index (SSIM) is a perceptual metric which compares the luminance, + contrast and structure of two images. The values of SSIM range between -1 + and 1, and higher values indicate closer resemblance to the original image.\",\"id\":\"SSIM\"},{\"description\":\"Inception + Score (IS) is an analysis of the labels predicted by an image classification + model when presented with a sample of the generated images.\",\"id\":\"IS\"}],\"models\":[{\"description\":\"An + image-to-image model to improve image resolution.\",\"id\":\"fal/AuraSR-v2\"},{\"description\":\"A + model that increases the resolution of an image.\",\"id\":\"keras-io/super-resolution\"},{\"description\":\"A + model that creates a set of variations of the input image in the style of + DALL-E using Stable Diffusion.\",\"id\":\"lambdalabs/sd-image-variations-diffusers\"},{\"description\":\"A + model that generates images based on segments in the input image and the text + prompt.\",\"id\":\"mfidabel/controlnet-segment-anything\"},{\"description\":\"A + model that takes an image and an instruction to edit the image.\",\"id\":\"timbrooks/instruct-pix2pix\"}],\"spaces\":[{\"description\":\"Image + enhancer application for low light.\",\"id\":\"keras-io/low-light-image-enhancement\"},{\"description\":\"Style + transfer application.\",\"id\":\"keras-io/neural-style-transfer\"},{\"description\":\"An + application that generates images based on segment control.\",\"id\":\"mfidabel/controlnet-segment-anything\"},{\"description\":\"Image + generation application that takes image control and text prompt.\",\"id\":\"hysts/ControlNet\"},{\"description\":\"Colorize + any image using this app.\",\"id\":\"ioclab/brightness-controlnet\"},{\"description\":\"Edit + images with instructions.\",\"id\":\"timbrooks/instruct-pix2pix\"}],\"summary\":\"Image-to-image + is the task of transforming an input image through a variety of possible manipulations + and enhancements, such as super-resolution, image inpainting, colorization, + and more.\",\"widgetModels\":[\"stabilityai/stable-diffusion-2-inpainting\"],\"youtubeId\":\"\",\"id\":\"image-to-image\",\"label\":\"Image-to-Image\",\"libraries\":[\"diffusers\",\"transformers\",\"transformers.js\"]},\"image-text-to-text\":{\"datasets\":[{\"description\":\"Instructions + composed of image and text.\",\"id\":\"liuhaotian/LLaVA-Instruct-150K\"},{\"description\":\"Conversation + turns where questions involve image and text.\",\"id\":\"liuhaotian/LLaVA-Pretrain\"},{\"description\":\"A + collection of datasets made for model fine-tuning.\",\"id\":\"HuggingFaceM4/the_cauldron\"},{\"description\":\"Screenshots + of websites with their HTML/CSS codes.\",\"id\":\"HuggingFaceM4/WebSight\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-text-to-text-input.png\",\"type\":\"img\"},{\"label\":\"Text + Prompt\",\"content\":\"Describe the position of the bee in detail.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"The + bee is sitting on a pink flower, surrounded by other flowers. The bee is positioned + in the center of the flower, with its head and front legs sticking out.\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"Powerful + vision language model with great visual understanding and reasoning capabilities.\",\"id\":\"meta-llama/Llama-3.2-11B-Vision-Instruct\"},{\"description\":\"Cutting-edge + vision language models.\",\"id\":\"allenai/Molmo-7B-D-0924\"},{\"description\":\"Small + yet powerful model.\",\"id\":\"vikhyatk/moondream2\"},{\"description\":\"Strong + image-text-to-text model.\",\"id\":\"Qwen/Qwen2-VL-7B-Instruct\"},{\"description\":\"Strong + image-text-to-text model.\",\"id\":\"mistralai/Pixtral-12B-2409\"},{\"description\":\"Strong + image-text-to-text model focused on documents.\",\"id\":\"stepfun-ai/GOT-OCR2_0\"}],\"spaces\":[{\"description\":\"Leaderboard + to evaluate vision language models.\",\"id\":\"opencompass/open_vlm_leaderboard\"},{\"description\":\"Vision + language models arena, where models are ranked by votes of users.\",\"id\":\"WildVision/vision-arena\"},{\"description\":\"Powerful + vision-language model assistant.\",\"id\":\"akhaliq/Molmo-7B-D-0924\"},{\"description\":\"An + image-text-to-text application focused on documents.\",\"id\":\"stepfun-ai/GOT_official_online_demo\"},{\"description\":\"An + application to compare outputs of different vision language models.\",\"id\":\"merve/compare_VLMs\"},{\"description\":\"An + application for chatting with an image-text-to-text model.\",\"id\":\"GanymedeNil/Qwen2-VL-7B\"}],\"summary\":\"Image-text-to-text + models take in an image and text prompt and output text. These models are + also called vision-language models, or VLMs. The difference from image-to-text + models is that these models take an additional text input, not restricting + the model to certain use cases like image captioning, and may also be trained + to accept a conversation as input.\",\"widgetModels\":[\"meta-llama/Llama-3.2-11B-Vision-Instruct\"],\"youtubeId\":\"IoGaGfU1CIg\",\"id\":\"image-text-to-text\",\"label\":\"Image-Text-to-Text\",\"libraries\":[\"transformers\"]},\"image-to-text\":{\"datasets\":[{\"description\":\"Dataset + from 12M image-text of Reddit\",\"id\":\"red_caps\"},{\"description\":\"Dataset + from 3.3M images of Google\",\"id\":\"datasets/conceptual_captions\"}],\"demo\":{\"inputs\":[{\"filename\":\"savanna.jpg\",\"type\":\"img\"}],\"outputs\":[{\"label\":\"Detailed + description\",\"content\":\"a herd of giraffes and zebras grazing in a field\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + robust image captioning model.\",\"id\":\"Salesforce/blip2-opt-2.7b\"},{\"description\":\"A + powerful and accurate image-to-text model that can also localize concepts + in images.\",\"id\":\"microsoft/kosmos-2-patch14-224\"},{\"description\":\"A + strong optical character recognition model.\",\"id\":\"facebook/nougat-base\"},{\"description\":\"A + powerful model that lets you have a conversation with the image.\",\"id\":\"llava-hf/llava-1.5-7b-hf\"}],\"spaces\":[{\"description\":\"An + application that compares various image captioning models.\",\"id\":\"nielsr/comparing-captioning-models\"},{\"description\":\"A + robust image captioning application.\",\"id\":\"flax-community/image-captioning\"},{\"description\":\"An + application that transcribes handwritings into text.\",\"id\":\"nielsr/TrOCR-handwritten\"},{\"description\":\"An + application that can caption images and answer questions about a given image.\",\"id\":\"Salesforce/BLIP\"},{\"description\":\"An + application that can caption images and answer questions with a conversational + agent.\",\"id\":\"Salesforce/BLIP2\"},{\"description\":\"An image captioning + application that demonstrates the effect of noise on captions.\",\"id\":\"johko/capdec-image-captioning\"}],\"summary\":\"Image + to text models output a text from a given image. Image captioning or optical + character recognition can be considered as the most common applications of + image to text.\",\"widgetModels\":[\"Salesforce/blip-image-captioning-large\"],\"youtubeId\":\"\",\"id\":\"image-to-text\",\"label\":\"Image-to-Text\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"keypoint-detection\":{\"datasets\":[{\"description\":\"A + dataset of hand keypoints of over 500k examples.\",\"id\":\"Vincent-luo/hagrid-mediapipe-hands\"}],\"demo\":{\"inputs\":[{\"filename\":\"keypoint-detection-input.png\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"keypoint-detection-output.png\",\"type\":\"img\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + robust keypoint detection model.\",\"id\":\"magic-leap-community/superpoint\"},{\"description\":\"Strong + keypoint detection model used to detect human pose.\",\"id\":\"facebook/sapiens-pose-1b\"}],\"spaces\":[{\"description\":\"An + application that detects hand keypoints in real-time.\",\"id\":\"datasciencedojo/Hand-Keypoint-Detection-Realtime\"},{\"description\":\"An + application to try a universal keypoint detection model.\",\"id\":\"merve/SuperPoint\"}],\"summary\":\"Keypoint + detection is the task of identifying meaningful distinctive points or features + in an image.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"keypoint-detection\",\"label\":\"Keypoint + Detection\",\"libraries\":[\"transformers\"]},\"mask-generation\":{\"datasets\":[],\"demo\":{\"inputs\":[{\"filename\":\"mask-generation-input.png\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"mask-generation-output.png\",\"type\":\"img\"}]},\"metrics\":[],\"models\":[{\"description\":\"Small + yet powerful mask generation model.\",\"id\":\"Zigeng/SlimSAM-uniform-50\"},{\"description\":\"Very + strong mask generation model.\",\"id\":\"facebook/sam2-hiera-large\"}],\"spaces\":[{\"description\":\"An + application that combines a mask generation model with a zero-shot object + detection model for text-guided image segmentation.\",\"id\":\"merve/OWLSAM2\"},{\"description\":\"An + application that compares the performance of a large and a small mask generation + model.\",\"id\":\"merve/slimsam\"},{\"description\":\"An application based + on an improved mask generation model.\",\"id\":\"SkalskiP/segment-anything-model-2\"},{\"description\":\"An + application to remove objects from videos using mask generation models.\",\"id\":\"SkalskiP/SAM_and_ProPainter\"}],\"summary\":\"Mask + generation is the task of generating masks that identify a specific object + or region of interest in a given image. Masks are often used in segmentation + tasks, where they provide a precise way to isolate the object of interest + for further processing or analysis.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"mask-generation\",\"label\":\"Mask + Generation\",\"libraries\":[\"transformers\"]},\"object-detection\":{\"datasets\":[{\"description\":\"Widely + used benchmark dataset for multiple vision tasks.\",\"id\":\"merve/coco2017\"},{\"description\":\"Multi-task + computer vision benchmark.\",\"id\":\"merve/pascal-voc\"}],\"demo\":{\"inputs\":[{\"filename\":\"object-detection-input.jpg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"object-detection-output.jpg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + Average Precision (AP) metric is the Area Under the PR Curve (AUC-PR). It + is calculated for each class separately\",\"id\":\"Average Precision\"},{\"description\":\"The + Mean Average Precision (mAP) metric is the overall average of the AP values\",\"id\":\"Mean + Average Precision\"},{\"description\":\"The AP\u03B1 metric is the Average + Precision at the IoU threshold of a \u03B1 value, for example, AP50 and AP75\",\"id\":\"AP\u03B1\"}],\"models\":[{\"description\":\"Solid + object detection model pre-trained on the COCO 2017 dataset.\",\"id\":\"facebook/detr-resnet-50\"},{\"description\":\"Real-time + and accurate object detection model.\",\"id\":\"jameslahm/yolov10x\"},{\"description\":\"Fast + and accurate object detection model trained on COCO and Object365 datasets.\",\"id\":\"PekingU/rtdetr_r18vd_coco_o365\"}],\"spaces\":[{\"description\":\"Leaderboard + to compare various object detection models across several metrics.\",\"id\":\"hf-vision/object_detection_leaderboard\"},{\"description\":\"An + application that contains various object detection models to try from.\",\"id\":\"Gradio-Blocks/Object-Detection-With-DETR-and-YOLOS\"},{\"description\":\"An + application that shows multiple cutting edge techniques for object detection + and tracking.\",\"id\":\"kadirnar/torchyolo\"},{\"description\":\"An object + tracking, segmentation and inpainting application.\",\"id\":\"VIPLab/Track-Anything\"},{\"description\":\"Very + fast object tracking application based on object detection.\",\"id\":\"merve/RT-DETR-tracking-coco\"}],\"summary\":\"Object + Detection models allow users to identify objects of certain defined classes. + Object detection models receive an image as input and output the images with + bounding boxes and labels on detected objects.\",\"widgetModels\":[\"facebook/detr-resnet-50\"],\"youtubeId\":\"WdAeKSOpxhw\",\"id\":\"object-detection\",\"label\":\"Object + Detection\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"video-classification\":{\"datasets\":[{\"description\":\"Benchmark + dataset used for video classification with videos that belong to 400 classes.\",\"id\":\"kinetics400\"}],\"demo\":{\"inputs\":[{\"filename\":\"video-classification-input.gif\",\"type\":\"img\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Playing + Guitar\",\"score\":0.514},{\"label\":\"Playing Tennis\",\"score\":0.193},{\"label\":\"Cooking\",\"score\":0.068}]}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"Strong + Video Classification model trained on the Kinetics 400 dataset.\",\"id\":\"google/vivit-b-16x2-kinetics400\"},{\"description\":\"Strong + Video Classification model trained on the Kinetics 400 dataset.\",\"id\":\"microsoft/xclip-base-patch32\"}],\"spaces\":[{\"description\":\"An + application that classifies video at different timestamps.\",\"id\":\"nateraw/lavila\"},{\"description\":\"An + application that classifies video.\",\"id\":\"fcakyon/video-classification\"}],\"summary\":\"Video + classification is the task of assigning a label or class to an entire video. + Videos are expected to have only one class for each video. Video classification + models take a video as input and return a prediction about which class the + video belongs to.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"video-classification\",\"label\":\"Video + Classification\",\"libraries\":[\"transformers\"]},\"question-answering\":{\"datasets\":[{\"description\":\"A + famous question answering dataset based on English articles from Wikipedia.\",\"id\":\"squad_v2\"},{\"description\":\"A + dataset of aggregated anonymized actual queries issued to the Google search + engine.\",\"id\":\"natural_questions\"}],\"demo\":{\"inputs\":[{\"label\":\"Question\",\"content\":\"Which + name is also used to describe the Amazon rainforest in English?\",\"type\":\"text\"},{\"label\":\"Context\",\"content\":\"The + Amazon rainforest, also known in English as Amazonia or the Amazon Jungle\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"Amazonia\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Exact + Match is a metric based on the strict character match of the predicted answer + and the right answer. For answers predicted correctly, the Exact Match will + be 1. Even if only one character is different, Exact Match will be 0\",\"id\":\"exact-match\"},{\"description\":\" + The F1-Score metric is useful if we value both false positives and false negatives + equally. The F1-Score is calculated on each word in the predicted sequence + against the correct answer\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + robust baseline model for most question answering domains.\",\"id\":\"deepset/roberta-base-squad2\"},{\"description\":\"Small + yet robust model that can answer questions.\",\"id\":\"distilbert/distilbert-base-cased-distilled-squad\"},{\"description\":\"A + special model that can answer questions from tables.\",\"id\":\"google/tapas-base-finetuned-wtq\"}],\"spaces\":[{\"description\":\"An + application that can answer a long question from Wikipedia.\",\"id\":\"deepset/wikipedia-assistant\"}],\"summary\":\"Question + Answering models can retrieve the answer to a question from a given text, + which is useful for searching for an answer in a document. Some question answering + models can generate answers without context!\",\"widgetModels\":[\"deepset/roberta-base-squad2\"],\"youtubeId\":\"ajPx5LwJD-I\",\"id\":\"question-answering\",\"label\":\"Question + Answering\",\"libraries\":[\"adapter-transformers\",\"allennlp\",\"transformers\",\"transformers.js\"]},\"reinforcement-learning\":{\"datasets\":[{\"description\":\"A + curation of widely used datasets for Data Driven Deep Reinforcement Learning + (D4RL)\",\"id\":\"edbeeching/decision_transformer_gym_replay\"}],\"demo\":{\"inputs\":[{\"label\":\"State\",\"content\":\"Red + traffic light, pedestrians are about to pass.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Action\",\"content\":\"Stop + the car.\",\"type\":\"text\"},{\"label\":\"Next State\",\"content\":\"Yellow + light, pedestrians have crossed.\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Accumulated + reward across all time steps discounted by a factor that ranges between 0 + and 1 and determines how much the agent optimizes for future relative to immediate + rewards. Measures how good is the policy ultimately found by a given algorithm + considering uncertainty over the future.\",\"id\":\"Discounted Total Reward\"},{\"description\":\"Average + return obtained after running the policy for a certain number of evaluation + episodes. As opposed to total reward, mean reward considers how much reward + a given algorithm receives while learning.\",\"id\":\"Mean Reward\"},{\"description\":\"Measures + how good a given algorithm is after a predefined time. Some algorithms may + be guaranteed to converge to optimal behavior across many time steps. However, + an agent that reaches an acceptable level of optimality after a given time + horizon may be preferable to one that ultimately reaches optimality but takes + a long time.\",\"id\":\"Level of Performance After Some Time\"}],\"models\":[{\"description\":\"A + Reinforcement Learning model trained on expert data from the Gym Hopper environment\",\"id\":\"edbeeching/decision-transformer-gym-hopper-expert\"},{\"description\":\"A + PPO agent playing seals/CartPole-v0 using the stable-baselines3 library and + the RL Zoo.\",\"id\":\"HumanCompatibleAI/ppo-seals-CartPole-v0\"}],\"spaces\":[{\"description\":\"An + application for a cute puppy agent learning to catch a stick.\",\"id\":\"ThomasSimonini/Huggy\"},{\"description\":\"An + application to play Snowball Fight with a reinforcement learning agent.\",\"id\":\"ThomasSimonini/SnowballFight\"}],\"summary\":\"Reinforcement + learning is the computational approach of learning from action by interacting + with an environment through trial and error and receiving rewards (negative + or positive) as feedback\",\"widgetModels\":[],\"youtubeId\":\"q0BiUn5LiBc\",\"id\":\"reinforcement-learning\",\"label\":\"Reinforcement + Learning\",\"libraries\":[\"transformers\",\"stable-baselines3\",\"ml-agents\",\"sample-factory\"]},\"sentence-similarity\":{\"datasets\":[{\"description\":\"Bing + queries with relevant passages from various web sources.\",\"id\":\"ms_marco\"}],\"demo\":{\"inputs\":[{\"label\":\"Source + sentence\",\"content\":\"Machine learning is so easy.\",\"type\":\"text\"},{\"label\":\"Sentences + to compare to\",\"content\":\"Deep learning is so straightforward.\",\"type\":\"text\"},{\"label\":\"\",\"content\":\"This + is so difficult, like rocket science.\",\"type\":\"text\"},{\"label\":\"\",\"content\":\"I + can't believe how much I struggled with this.\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Deep + learning is so straightforward.\",\"score\":0.623},{\"label\":\"This is so + difficult, like rocket science.\",\"score\":0.413},{\"label\":\"I can't believe + how much I struggled with this.\",\"score\":0.256}]}]},\"metrics\":[{\"description\":\"Reciprocal + Rank is a measure used to rank the relevancy of documents given a set of documents. + Reciprocal Rank is the reciprocal of the rank of the document retrieved, meaning, + if the rank is 3, the Reciprocal Rank is 0.33. If the rank is 1, the Reciprocal + Rank is 1\",\"id\":\"Mean Reciprocal Rank\"},{\"description\":\"The similarity + of the embeddings is evaluated mainly on cosine similarity. It is calculated + as the cosine of the angle between two vectors. It is particularly useful + when your texts are not the same length\",\"id\":\"Cosine Similarity\"}],\"models\":[{\"description\":\"This + model works well for sentences and paragraphs and can be used for clustering/grouping + and semantic searches.\",\"id\":\"sentence-transformers/all-mpnet-base-v2\"},{\"description\":\"A + multilingual robust sentence similarity model..\",\"id\":\"BAAI/bge-m3\"}],\"spaces\":[{\"description\":\"An + application that leverages sentence similarity to answer questions from YouTube + videos.\",\"id\":\"Gradio-Blocks/Ask_Questions_To_YouTube_Videos\"},{\"description\":\"An + application that retrieves relevant PubMed abstracts for a given online article + which can be used as further references.\",\"id\":\"Gradio-Blocks/pubmed-abstract-retriever\"},{\"description\":\"An + application that leverages sentence similarity to summarize text.\",\"id\":\"nickmuchi/article-text-summarizer\"},{\"description\":\"A + guide that explains how Sentence Transformers can be used for semantic search.\",\"id\":\"sentence-transformers/Sentence_Transformers_for_semantic_search\"}],\"summary\":\"Sentence + Similarity is the task of determining how similar two texts are. Sentence + similarity models convert input texts into vectors (embeddings) that capture + semantic information and calculate how close (similar) they are between them. + This task is particularly useful for information retrieval and clustering/grouping.\",\"widgetModels\":[\"BAAI/bge-small-en-v1.5\"],\"youtubeId\":\"VCZq5AkbNEU\",\"id\":\"sentence-similarity\",\"label\":\"Sentence + Similarity\",\"libraries\":[\"sentence-transformers\",\"spacy\",\"transformers.js\"]},\"summarization\":{\"canonicalId\":\"text2text-generation\",\"datasets\":[{\"description\":\"News + articles in five different languages along with their summaries. Widely used + for benchmarking multilingual summarization models.\",\"id\":\"mlsum\"},{\"description\":\"English + conversations and their summaries. Useful for benchmarking conversational + agents.\",\"id\":\"samsum\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"The + tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey + building, and the tallest structure in Paris. Its base is square, measuring + 125 metres (410 ft) on each side. It was the first structure to reach a height + of 300 metres. Excluding transmitters, the Eiffel Tower is the second tallest + free-standing structure in France after the Millau Viaduct.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Output\",\"content\":\"The + tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey + building. It was the first structure to reach a height of 300 metres.\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"The + generated sequence is compared against its summary, and the overlap of tokens + are counted. ROUGE-N refers to overlap of N subsequent tokens, ROUGE-1 refers + to overlap of single tokens and ROUGE-2 is the overlap of two subsequent tokens.\",\"id\":\"rouge\"}],\"models\":[{\"description\":\"A + strong summarization model trained on English news articles. Excels at generating + factual summaries.\",\"id\":\"facebook/bart-large-cnn\"},{\"description\":\"A + summarization model trained on medical articles.\",\"id\":\"Falconsai/medical_summarization\"}],\"spaces\":[{\"description\":\"An + application that can summarize long paragraphs.\",\"id\":\"pszemraj/summarize-long-text\"},{\"description\":\"A + much needed summarization application for terms and conditions.\",\"id\":\"ml6team/distilbart-tos-summarizer-tosdr\"},{\"description\":\"An + application that summarizes long documents.\",\"id\":\"pszemraj/document-summarization\"},{\"description\":\"An + application that can detect errors in abstractive summarization.\",\"id\":\"ml6team/post-processing-summarization\"}],\"summary\":\"Summarization + is the task of producing a shorter version of a document while preserving + its important information. Some models can extract text from the original + input, while other models can generate entirely new text.\",\"widgetModels\":[\"facebook/bart-large-cnn\"],\"youtubeId\":\"yHnr5Dk2zCI\",\"id\":\"summarization\",\"label\":\"Summarization\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"table-question-answering\":{\"datasets\":[{\"description\":\"The + WikiTableQuestions dataset is a large-scale dataset for the task of question + answering on semi-structured tables.\",\"id\":\"wikitablequestions\"},{\"description\":\"WikiSQL + is a dataset of 80654 hand-annotated examples of questions and SQL queries + distributed across 24241 tables from Wikipedia.\",\"id\":\"wikisql\"}],\"demo\":{\"inputs\":[{\"table\":[[\"Rank\",\"Name\",\"No.of + reigns\",\"Combined days\"],[\"1\",\"lou Thesz\",\"3\",\"3749\"],[\"2\",\"Ric + Flair\",\"8\",\"3103\"],[\"3\",\"Harley Race\",\"7\",\"1799\"]],\"type\":\"tabular\"},{\"label\":\"Question\",\"content\":\"What + is the number of reigns for Harley Race?\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Result\",\"content\":\"7\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Checks + whether the predicted answer(s) is the same as the ground-truth answer(s).\",\"id\":\"Denotation + Accuracy\"}],\"models\":[{\"description\":\"A table question answering model + that is capable of neural SQL execution, i.e., employ TAPEX to execute a SQL + query on a given table.\",\"id\":\"microsoft/tapex-base\"},{\"description\":\"A + robust table question answering model.\",\"id\":\"google/tapas-base-finetuned-wtq\"}],\"spaces\":[{\"description\":\"An + application that answers questions based on table CSV files.\",\"id\":\"katanaml/table-query\"}],\"summary\":\"Table + Question Answering (Table QA) is the answering a question about an information + on a given table.\",\"widgetModels\":[\"google/tapas-base-finetuned-wtq\"],\"id\":\"table-question-answering\",\"label\":\"Table + Question Answering\",\"libraries\":[\"transformers\"]},\"tabular-classification\":{\"datasets\":[{\"description\":\"A + comprehensive curation of datasets covering all benchmarks.\",\"id\":\"inria-soda/tabular-benchmark\"}],\"demo\":{\"inputs\":[{\"table\":[[\"Glucose\",\"Blood + Pressure \",\"Skin Thickness\",\"Insulin\",\"BMI\"],[\"148\",\"72\",\"35\",\"0\",\"33.6\"],[\"150\",\"50\",\"30\",\"0\",\"35.1\"],[\"141\",\"60\",\"29\",\"1\",\"39.2\"]],\"type\":\"tabular\"}],\"outputs\":[{\"table\":[[\"Diabetes\"],[\"1\"],[\"1\"],[\"0\"]],\"type\":\"tabular\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"Breast + cancer prediction model based on decision trees.\",\"id\":\"scikit-learn/cancer-prediction-trees\"}],\"spaces\":[{\"description\":\"An + application that can predict defective products on a production line.\",\"id\":\"scikit-learn/tabular-playground\"},{\"description\":\"An + application that compares various tabular classification techniques on different + datasets.\",\"id\":\"scikit-learn/classification\"}],\"summary\":\"Tabular + classification is the task of classifying a target category (a group) based + on set of attributes.\",\"widgetModels\":[\"scikit-learn/tabular-playground\"],\"youtubeId\":\"\",\"id\":\"tabular-classification\",\"label\":\"Tabular + Classification\",\"libraries\":[\"sklearn\"]},\"tabular-regression\":{\"datasets\":[{\"description\":\"A + comprehensive curation of datasets covering all benchmarks.\",\"id\":\"inria-soda/tabular-benchmark\"}],\"demo\":{\"inputs\":[{\"table\":[[\"Car + Name\",\"Horsepower\",\"Weight\"],[\"ford torino\",\"140\",\"3,449\"],[\"amc + hornet\",\"97\",\"2,774\"],[\"toyota corolla\",\"65\",\"1,773\"]],\"type\":\"tabular\"}],\"outputs\":[{\"table\":[[\"MPG + (miles per gallon)\"],[\"17\"],[\"18\"],[\"31\"]],\"type\":\"tabular\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"mse\"},{\"description\":\"Coefficient + of determination (or R-squared) is a measure of how well the model fits the + data. Higher R-squared is considered a better fit.\",\"id\":\"r-squared\"}],\"models\":[{\"description\":\"Fish + weight prediction based on length measurements and species.\",\"id\":\"scikit-learn/Fish-Weight\"}],\"spaces\":[{\"description\":\"An + application that can predict weight of a fish based on set of attributes.\",\"id\":\"scikit-learn/fish-weight-prediction\"}],\"summary\":\"Tabular + regression is the task of predicting a numerical value given a set of attributes.\",\"widgetModels\":[\"scikit-learn/Fish-Weight\"],\"youtubeId\":\"\",\"id\":\"tabular-regression\",\"label\":\"Tabular + Regression\",\"libraries\":[\"sklearn\"]},\"text-classification\":{\"datasets\":[{\"description\":\"A + widely used dataset used to benchmark multiple variants of text classification.\",\"id\":\"nyu-mll/glue\"},{\"description\":\"A + text classification dataset used to benchmark natural language inference models\",\"id\":\"stanfordnlp/snli\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"I + love Hugging Face!\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"POSITIVE\",\"score\":0.9},{\"label\":\"NEUTRAL\",\"score\":0.1},{\"label\":\"NEGATIVE\",\"score\":0}]}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"The + F1 metric is the harmonic mean of the precision and recall. It can be calculated + as: F1 = 2 * (precision * recall) / (precision + recall)\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + robust model trained for sentiment analysis.\",\"id\":\"distilbert/distilbert-base-uncased-finetuned-sst-2-english\"},{\"description\":\"A + sentiment analysis model specialized in financial sentiment.\",\"id\":\"ProsusAI/finbert\"},{\"description\":\"A + sentiment analysis model specialized in analyzing tweets.\",\"id\":\"cardiffnlp/twitter-roberta-base-sentiment-latest\"},{\"description\":\"A + model that can classify languages.\",\"id\":\"papluca/xlm-roberta-base-language-detection\"},{\"description\":\"A + model that can classify text generation attacks.\",\"id\":\"meta-llama/Prompt-Guard-86M\"}],\"spaces\":[{\"description\":\"An + application that can classify financial sentiment.\",\"id\":\"IoannisTr/Tech_Stocks_Trading_Assistant\"},{\"description\":\"A + dashboard that contains various text classification tasks.\",\"id\":\"miesnerjacob/Multi-task-NLP\"},{\"description\":\"An + application that analyzes user reviews in healthcare.\",\"id\":\"spacy/healthsea-demo\"}],\"summary\":\"Text + Classification is the task of assigning a label or class to a given text. + Some use cases are sentiment analysis, natural language inference, and assessing + grammatical correctness.\",\"widgetModels\":[\"distilbert/distilbert-base-uncased-finetuned-sst-2-english\"],\"youtubeId\":\"leNG9fN9FQU\",\"id\":\"text-classification\",\"label\":\"Text + Classification\",\"libraries\":[\"adapter-transformers\",\"setfit\",\"spacy\",\"transformers\",\"transformers.js\"]},\"text-generation\":{\"datasets\":[{\"description\":\"A + large multilingual dataset of text crawled from the web.\",\"id\":\"mc4\"},{\"description\":\"Diverse + open-source data consisting of 22 smaller high-quality datasets. It was used + to train GPT-Neo.\",\"id\":\"the_pile\"},{\"description\":\"Truly open-source, + curated and cleaned dialogue dataset.\",\"id\":\"HuggingFaceH4/ultrachat_200k\"},{\"description\":\"An + instruction dataset with preference ratings on responses.\",\"id\":\"openbmb/UltraFeedback\"},{\"description\":\"A + large synthetic dataset for alignment of text generation models.\",\"id\":\"argilla/magpie-ultra-v0.1\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"Once + upon a time,\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Output\",\"content\":\"Once + upon a time, we knew that our ancestors were on the verge of extinction. The + great explorers and poets of the Old World, from Alexander the Great to Chaucer, + are dead and gone. A good many of our ancient explorers and poets have\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Cross + Entropy is a metric that calculates the difference between two probability + distributions. Each probability distribution is the distribution of predicted + words\",\"id\":\"Cross Entropy\"},{\"description\":\"The Perplexity metric + is the exponential of the cross-entropy loss. It evaluates the probabilities + assigned to the next word by the model. Lower perplexity indicates better + performance\",\"id\":\"Perplexity\"}],\"models\":[{\"description\":\"A text-generation + model trained to follow instructions.\",\"id\":\"google/gemma-2-2b-it\"},{\"description\":\"Very + powerful text generation model trained to follow instructions.\",\"id\":\"meta-llama/Meta-Llama-3.1-8B-Instruct\"},{\"description\":\"Small + yet powerful text generation model.\",\"id\":\"microsoft/Phi-3-mini-4k-instruct\"},{\"description\":\"A + very powerful model that can solve mathematical problems.\",\"id\":\"AI-MO/NuminaMath-7B-TIR\"},{\"description\":\"Strong + text generation model to follow instructions.\",\"id\":\"Qwen/Qwen2.5-7B-Instruct\"},{\"description\":\"Very + strong open-source large language model.\",\"id\":\"nvidia/Llama-3.1-Nemotron-70B-Instruct\"}],\"spaces\":[{\"description\":\"A + leaderboard to compare different open-source text generation models based + on various benchmarks.\",\"id\":\"open-llm-leaderboard/open_llm_leaderboard\"},{\"description\":\"A + leaderboard for comparing chain-of-thought performance of models.\",\"id\":\"logikon/open_cot_leaderboard\"},{\"description\":\"An + text generation based application based on a very powerful LLaMA2 model.\",\"id\":\"ysharma/Explore_llamav2_with_TGI\"},{\"description\":\"An + text generation based application to converse with Zephyr model.\",\"id\":\"HuggingFaceH4/zephyr-chat\"},{\"description\":\"A + leaderboard that ranks text generation models based on blind votes from people.\",\"id\":\"lmsys/chatbot-arena-leaderboard\"},{\"description\":\"An + chatbot to converse with a very powerful text generation model.\",\"id\":\"mlabonne/phixtral-chat\"}],\"summary\":\"Generating + text is the task of generating new text given another text. These models can, + for example, fill in incomplete text or paraphrase.\",\"widgetModels\":[\"mistralai/Mistral-Nemo-Instruct-2407\"],\"youtubeId\":\"e9gNEAlsOvU\",\"id\":\"text-generation\",\"label\":\"Text + Generation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"text-to-image\":{\"datasets\":[{\"description\":\"RedCaps + is a large-scale dataset of 12M image-text pairs collected from Reddit.\",\"id\":\"red_caps\"},{\"description\":\"Conceptual + Captions is a dataset consisting of ~3.3M images annotated with captions.\",\"id\":\"conceptual_captions\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"A + city above clouds, pastel colors, Victorian style\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"image.jpeg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + Inception Score (IS) measure assesses diversity and meaningfulness. It uses + a generated image sample to predict its label. A higher score signifies more + diverse and meaningful images.\",\"id\":\"IS\"},{\"description\":\"The Fr\xE9chet + Inception Distance (FID) calculates the distance between distributions between + synthetic and real samples. A lower FID score indicates better similarity + between the distributions of real and generated images.\",\"id\":\"FID\"},{\"description\":\"R-precision + assesses how the generated image aligns with the provided text description. + It uses the generated images as queries to retrieve relevant text descriptions. + The top 'r' relevant descriptions are selected and used to calculate R-precision + as r/R, where 'R' is the number of ground truth descriptions associated with + the generated images. A higher R-precision value indicates a better model.\",\"id\":\"R-Precision\"}],\"models\":[{\"description\":\"One + of the most powerful image generation models that can generate realistic outputs.\",\"id\":\"black-forest-labs/FLUX.1-dev\"},{\"description\":\"A + powerful yet fast image generation model.\",\"id\":\"latent-consistency/lcm-lora-sdxl\"},{\"description\":\"Text-to-image + model for photorealistic generation.\",\"id\":\"Kwai-Kolors/Kolors\"},{\"description\":\"A + powerful text-to-image model.\",\"id\":\"stabilityai/stable-diffusion-3-medium-diffusers\"}],\"spaces\":[{\"description\":\"A + powerful text-to-image application.\",\"id\":\"stabilityai/stable-diffusion-3-medium\"},{\"description\":\"A + text-to-image application to generate comics.\",\"id\":\"jbilcke-hf/ai-comic-factory\"},{\"description\":\"An + application to match multiple custom image generation models.\",\"id\":\"multimodalart/flux-lora-lab\"},{\"description\":\"A + powerful yet very fast image generation application.\",\"id\":\"latent-consistency/lcm-lora-for-sdxl\"},{\"description\":\"A + gallery to explore various text-to-image models.\",\"id\":\"multimodalart/LoraTheExplorer\"},{\"description\":\"An + application for `text-to-image`, `image-to-image` and image inpainting.\",\"id\":\"ArtGAN/Stable-Diffusion-ControlNet-WebUI\"},{\"description\":\"An + application to generate realistic images given photos of a person and a prompt.\",\"id\":\"InstantX/InstantID\"}],\"summary\":\"Text-to-image + is the task of generating images from input text. These pipelines can also + be used to modify and edit images based on text prompts.\",\"widgetModels\":[\"black-forest-labs/FLUX.1-dev\"],\"youtubeId\":\"\",\"id\":\"text-to-image\",\"label\":\"Text-to-Image\",\"libraries\":[\"diffusers\"]},\"text-to-speech\":{\"canonicalId\":\"text-to-audio\",\"datasets\":[{\"description\":\"10K + hours of multi-speaker English dataset.\",\"id\":\"parler-tts/mls_eng_10k\"},{\"description\":\"Multi-speaker + English dataset.\",\"id\":\"mythicinfinity/libritts_r\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"I + love audio models on the Hub!\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"audio.wav\",\"type\":\"audio\"}]},\"metrics\":[{\"description\":\"The + Mel Cepstral Distortion (MCD) metric is used to calculate the quality of generated + speech.\",\"id\":\"mel cepstral distortion\"}],\"models\":[{\"description\":\"A + powerful TTS model.\",\"id\":\"parler-tts/parler-tts-large-v1\"},{\"description\":\"A + massively multi-lingual TTS model.\",\"id\":\"coqui/XTTS-v2\"},{\"description\":\"Robust + TTS model.\",\"id\":\"metavoiceio/metavoice-1B-v0.1\"},{\"description\":\"A + prompt based, powerful TTS model.\",\"id\":\"parler-tts/parler_tts_mini_v0.1\"}],\"spaces\":[{\"description\":\"An + application for generate highly realistic, multilingual speech.\",\"id\":\"suno/bark\"},{\"description\":\"An + application on XTTS, a voice generation model that lets you clone voices into + different languages.\",\"id\":\"coqui/xtts\"},{\"description\":\"An application + that generates speech in different styles in English and Chinese.\",\"id\":\"mrfakename/E2-F5-TTS\"},{\"description\":\"An + application that synthesizes speech for diverse speaker prompts.\",\"id\":\"parler-tts/parler_tts_mini\"}],\"summary\":\"Text-to-Speech + (TTS) is the task of generating natural sounding speech given text input. + TTS models can be extended to have a single model that generates speech for + multiple speakers and multiple languages.\",\"widgetModels\":[\"suno/bark\"],\"youtubeId\":\"NW62DpzJ274\",\"id\":\"text-to-speech\",\"label\":\"Text-to-Speech\",\"libraries\":[\"espnet\",\"tensorflowtts\",\"transformers\",\"transformers.js\"]},\"text-to-video\":{\"datasets\":[{\"description\":\"Microsoft + Research Video to Text is a large-scale dataset for open domain video captioning\",\"id\":\"iejMac/CLIP-MSR-VTT\"},{\"description\":\"UCF101 + Human Actions dataset consists of 13,320 video clips from YouTube, with 101 + classes.\",\"id\":\"quchenyuan/UCF101-ZIP\"},{\"description\":\"A high-quality + dataset for human action recognition in YouTube videos.\",\"id\":\"nateraw/kinetics\"},{\"description\":\"A + dataset of video clips of humans performing pre-defined basic actions with + everyday objects.\",\"id\":\"HuggingFaceM4/something_something_v2\"},{\"description\":\"This + dataset consists of text-video pairs and contains noisy samples with irrelevant + video descriptions\",\"id\":\"HuggingFaceM4/webvid\"},{\"description\":\"A + dataset of short Flickr videos for the temporal localization of events with + descriptions.\",\"id\":\"iejMac/CLIP-DiDeMo\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"Darth + Vader is surfing on the waves.\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"text-to-video-output.gif\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"Inception + Score uses an image classification model that predicts class labels and evaluates + how distinct and diverse the images are. A higher score indicates better video + generation.\",\"id\":\"is\"},{\"description\":\"Frechet Inception Distance + uses an image classification model to obtain image embeddings. The metric + compares mean and standard deviation of the embeddings of real and generated + images. A smaller score indicates better video generation.\",\"id\":\"fid\"},{\"description\":\"Frechet + Video Distance uses a model that captures coherence for changes in frames + and the quality of each frame. A smaller score indicates better video generation.\",\"id\":\"fvd\"},{\"description\":\"CLIPSIM + measures similarity between video frames and text using an image-text similarity + model. A higher score indicates better video generation.\",\"id\":\"clipsim\"}],\"models\":[{\"description\":\"A + strong model for consistent video generation.\",\"id\":\"rain1011/pyramid-flow-sd3\"},{\"description\":\"A + robust model for text-to-video generation.\",\"id\":\"VideoCrafter/VideoCrafter2\"},{\"description\":\"A + cutting-edge text-to-video generation model.\",\"id\":\"TIGER-Lab/T2V-Turbo-V2\"}],\"spaces\":[{\"description\":\"An + application that generates video from text.\",\"id\":\"VideoCrafter/VideoCrafter\"},{\"description\":\"Consistent + video generation application.\",\"id\":\"TIGER-Lab/T2V-Turbo-V2\"},{\"description\":\"A + cutting edge video generation application.\",\"id\":\"Pyramid-Flow/pyramid-flow\"}],\"summary\":\"Text-to-video + models can be used in any application that requires generating consistent + sequence of images from text. \",\"widgetModels\":[],\"id\":\"text-to-video\",\"label\":\"Text-to-Video\",\"libraries\":[\"diffusers\"]},\"token-classification\":{\"datasets\":[{\"description\":\"A + widely used dataset useful to benchmark named entity recognition models.\",\"id\":\"eriktks/conll2003\"},{\"description\":\"A + multilingual dataset of Wikipedia articles annotated for named entity recognition + in over 150 different languages.\",\"id\":\"unimelb-nlp/wikiann\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"My + name is Omar and I live in Z\xFCrich.\",\"type\":\"text\"}],\"outputs\":[{\"text\":\"My + name is Omar and I live in Z\xFCrich.\",\"tokens\":[{\"type\":\"PERSON\",\"start\":11,\"end\":15},{\"type\":\"GPE\",\"start\":30,\"end\":36}],\"type\":\"text-with-tokens\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + robust performance model to identify people, locations, organizations and + names of miscellaneous entities.\",\"id\":\"dslim/bert-base-NER\"},{\"description\":\"A + strong model to identify people, locations, organizations and names in multiple + languages.\",\"id\":\"FacebookAI/xlm-roberta-large-finetuned-conll03-english\"},{\"description\":\"A + token classification model specialized on medical entity recognition.\",\"id\":\"blaze999/Medical-NER\"},{\"description\":\"Flair + models are typically the state of the art in named entity recognition tasks.\",\"id\":\"flair/ner-english\"}],\"spaces\":[{\"description\":\"An + application that can recognizes entities, extracts noun chunks and recognizes + various linguistic features of each token.\",\"id\":\"spacy/gradio_pipeline_visualizer\"}],\"summary\":\"Token + classification is a natural language understanding task in which a label is + assigned to some tokens in a text. Some popular token classification subtasks + are Named Entity Recognition (NER) and Part-of-Speech (PoS) tagging. NER models + could be trained to identify specific entities in a text, such as dates, individuals + and places; and PoS tagging would identify, for example, which words in a + text are verbs, nouns, and punctuation marks.\",\"widgetModels\":[\"FacebookAI/xlm-roberta-large-finetuned-conll03-english\"],\"youtubeId\":\"wVHdVlPScxA\",\"id\":\"token-classification\",\"label\":\"Token + Classification\",\"libraries\":[\"adapter-transformers\",\"flair\",\"spacy\",\"span-marker\",\"stanza\",\"transformers\",\"transformers.js\"]},\"translation\":{\"canonicalId\":\"text2text-generation\",\"datasets\":[{\"description\":\"A + dataset of copyright-free books translated into 16 different languages.\",\"id\":\"Helsinki-NLP/opus_books\"},{\"description\":\"An + example of translation between programming languages. This dataset consists + of functions in Java and C#.\",\"id\":\"google/code_x_glue_cc_code_to_code_trans\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"My + name is Omar and I live in Z\xFCrich.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Output\",\"content\":\"Mein + Name ist Omar und ich wohne in Z\xFCrich.\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"BLEU + score is calculated by counting the number of shared single or subsequent + tokens between the generated sequence and the reference. Subsequent n tokens + are called \u201Cn-grams\u201D. Unigram refers to a single token while bi-gram + refers to token pairs and n-grams refer to n subsequent tokens. The score + ranges from 0 to 1, where 1 means the translation perfectly matched and 0 + did not match at all\",\"id\":\"bleu\"},{\"description\":\"\",\"id\":\"sacrebleu\"}],\"models\":[{\"description\":\"Very + powerful model that can translate many languages between each other, especially + low-resource languages.\",\"id\":\"facebook/nllb-200-1.3B\"},{\"description\":\"A + general-purpose Transformer that can be used to translate from English to + German, French, or Romanian.\",\"id\":\"google-t5/t5-base\"}],\"spaces\":[{\"description\":\"An + application that can translate between 100 languages.\",\"id\":\"Iker/Translate-100-languages\"},{\"description\":\"An + application that can translate between many languages.\",\"id\":\"Geonmo/nllb-translation-demo\"}],\"summary\":\"Translation + is the task of converting text from one language to another.\",\"widgetModels\":[\"facebook/mbart-large-50-many-to-many-mmt\"],\"youtubeId\":\"1JvfrvZgi6c\",\"id\":\"translation\",\"label\":\"Translation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"unconditional-image-generation\":{\"datasets\":[{\"description\":\"The + CIFAR-100 dataset consists of 60000 32x32 colour images in 100 classes, with + 600 images per class.\",\"id\":\"cifar100\"},{\"description\":\"Multiple images + of celebrities, used for facial expression translation.\",\"id\":\"CelebA\"}],\"demo\":{\"inputs\":[{\"label\":\"Seed\",\"content\":\"42\",\"type\":\"text\"},{\"label\":\"Number + of images to generate:\",\"content\":\"4\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"unconditional-image-generation-output.jpeg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + inception score (IS) evaluates the quality of generated images. It measures + the diversity of the generated images (the model predictions are evenly distributed + across all possible labels) and their 'distinction' or 'sharpness' (the model + confidently predicts a single label for each image).\",\"id\":\"Inception + score (IS)\"},{\"description\":\"The Fr\xE9chet Inception Distance (FID) evaluates + the quality of images created by a generative model by calculating the distance + between feature vectors for real and generated images.\",\"id\":\"Fre\u0107het + Inception Distance (FID)\"}],\"models\":[{\"description\":\"High-quality image + generation model trained on the CIFAR-10 dataset. It synthesizes images of + the ten classes presented in the dataset using diffusion probabilistic models, + a class of latent variable models inspired by considerations from nonequilibrium + thermodynamics.\",\"id\":\"google/ddpm-cifar10-32\"},{\"description\":\"High-quality + image generation model trained on the 256x256 CelebA-HQ dataset. It synthesizes + images of faces using diffusion probabilistic models, a class of latent variable + models inspired by considerations from nonequilibrium thermodynamics.\",\"id\":\"google/ddpm-celebahq-256\"}],\"spaces\":[{\"description\":\"An + application that can generate realistic faces.\",\"id\":\"CompVis/celeba-latent-diffusion\"}],\"summary\":\"Unconditional + image generation is the task of generating images with no condition in any + context (like a prompt text or another image). Once trained, the model will + create images that resemble its training data distribution.\",\"widgetModels\":[\"\"],\"youtubeId\":\"\",\"id\":\"unconditional-image-generation\",\"label\":\"Unconditional + Image Generation\",\"libraries\":[\"diffusers\"]},\"video-text-to-text\":{\"datasets\":[{\"description\":\"Multiple-choice + questions and answers about videos.\",\"id\":\"lmms-lab/Video-MME\"},{\"description\":\"A + dataset of instructions and question-answer pairs about videos.\",\"id\":\"lmms-lab/VideoChatGPT\"},{\"description\":\"Large + video understanding dataset.\",\"id\":\"HuggingFaceFV/finevideo\"}],\"demo\":{\"inputs\":[{\"filename\":\"video-text-to-text-input.gif\",\"type\":\"img\"},{\"label\":\"Text + Prompt\",\"content\":\"What is happening in this video?\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"The + video shows a series of images showing a fountain with water jets and a variety + of colorful flowers and butterflies in the background.\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + robust video-text-to-text model that can take in image and video inputs.\",\"id\":\"llava-hf/llava-onevision-qwen2-72b-ov-hf\"},{\"description\":\"Large + and powerful video-text-to-text model that can take in image and video inputs.\",\"id\":\"llava-hf/LLaVA-NeXT-Video-34B-hf\"}],\"spaces\":[{\"description\":\"An + application to chat with a video-text-to-text model.\",\"id\":\"llava-hf/video-llava\"},{\"description\":\"A + leaderboard for various video-text-to-text models.\",\"id\":\"opencompass/openvlm_video_leaderboard\"}],\"summary\":\"Video-text-to-text + models take in a video and a text prompt and output text. These models are + also called video-language models.\",\"widgetModels\":[\"\"],\"youtubeId\":\"\",\"id\":\"video-text-to-text\",\"label\":\"Video-Text-to-Text\",\"libraries\":[\"transformers\"]},\"visual-question-answering\":{\"datasets\":[{\"description\":\"A + widely used dataset containing questions (with answers) about images.\",\"id\":\"Graphcore/vqa\"},{\"description\":\"A + dataset to benchmark visual reasoning based on text in images.\",\"id\":\"facebook/textvqa\"}],\"demo\":{\"inputs\":[{\"filename\":\"elephant.jpeg\",\"type\":\"img\"},{\"label\":\"Question\",\"content\":\"What + is in this image?\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"elephant\",\"score\":0.97},{\"label\":\"elephants\",\"score\":0.06},{\"label\":\"animal\",\"score\":0.003}]}]},\"isPlaceholder\":false,\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"Measures + how much a predicted answer differs from the ground truth based on the difference + in their semantic meaning.\",\"id\":\"wu-palmer similarity\"}],\"models\":[{\"description\":\"A + visual question answering model trained to convert charts and plots to text.\",\"id\":\"google/deplot\"},{\"description\":\"A + visual question answering model trained for mathematical reasoning and chart + derendering from images.\",\"id\":\"google/matcha-base\"},{\"description\":\"A + strong visual question answering that answers questions from book covers.\",\"id\":\"google/pix2struct-ocrvqa-large\"}],\"spaces\":[{\"description\":\"An + application that compares visual question answering models across different + tasks.\",\"id\":\"merve/pix2struct\"},{\"description\":\"An application that + can answer questions based on images.\",\"id\":\"nielsr/vilt-vqa\"},{\"description\":\"An + application that can caption images and answer questions about a given image. + \",\"id\":\"Salesforce/BLIP\"},{\"description\":\"An application that can + caption images and answer questions about a given image. \",\"id\":\"vumichien/Img2Prompt\"}],\"summary\":\"Visual + Question Answering is the task of answering open-ended questions based on + an image. They output natural language responses to natural language questions.\",\"widgetModels\":[\"dandelin/vilt-b32-finetuned-vqa\"],\"youtubeId\":\"\",\"id\":\"visual-question-answering\",\"label\":\"Visual + Question Answering\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"zero-shot-classification\":{\"datasets\":[{\"description\":\"A + widely used dataset used to benchmark multiple variants of text classification.\",\"id\":\"nyu-mll/glue\"},{\"description\":\"The + Multi-Genre Natural Language Inference (MultiNLI) corpus is a crowd-sourced + collection of 433k sentence pairs annotated with textual entailment information.\",\"id\":\"nyu-mll/multi_nli\"},{\"description\":\"FEVER + is a publicly available dataset for fact extraction and verification against + textual sources.\",\"id\":\"fever/fever\"}],\"demo\":{\"inputs\":[{\"label\":\"Text + Input\",\"content\":\"Dune is the best movie ever.\",\"type\":\"text\"},{\"label\":\"Candidate + Labels\",\"content\":\"CINEMA, ART, MUSIC\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"CINEMA\",\"score\":0.9},{\"label\":\"ART\",\"score\":0.1},{\"label\":\"MUSIC\",\"score\":0}]}]},\"metrics\":[],\"models\":[{\"description\":\"Powerful + zero-shot text classification model.\",\"id\":\"facebook/bart-large-mnli\"},{\"description\":\"Powerful + zero-shot multilingual text classification model that can accomplish multiple + tasks.\",\"id\":\"MoritzLaurer/mDeBERTa-v3-base-xnli-multilingual-nli-2mil7\"}],\"spaces\":[],\"summary\":\"Zero-shot + text classification is a task in natural language processing where a model + is trained on a set of labeled examples but is then able to classify new examples + from previously unseen classes.\",\"widgetModels\":[\"facebook/bart-large-mnli\"],\"id\":\"zero-shot-classification\",\"label\":\"Zero-Shot + Classification\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"zero-shot-image-classification\":{\"datasets\":[{\"description\":\"\",\"id\":\"\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-classification-input.jpeg\",\"type\":\"img\"},{\"label\":\"Classes\",\"content\":\"cat, + dog, bird\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Cat\",\"score\":0.664},{\"label\":\"Dog\",\"score\":0.329},{\"label\":\"Bird\",\"score\":0.008}]}]},\"metrics\":[{\"description\":\"Computes + the number of times the correct label appears in top K labels predicted\",\"id\":\"top-K + accuracy\"}],\"models\":[{\"description\":\"Robust image classification model + trained on publicly available image-caption data.\",\"id\":\"openai/clip-vit-base-patch16\"},{\"description\":\"Strong + zero-shot image classification model.\",\"id\":\"google/siglip-so400m-patch14-224\"},{\"description\":\"Small + yet powerful zero-shot image classification model that can run on edge devices.\",\"id\":\"apple/MobileCLIP-S1-OpenCLIP\"},{\"description\":\"Strong + image classification model for biomedical domain.\",\"id\":\"microsoft/BiomedCLIP-PubMedBERT_256-vit_base_patch16_224\"}],\"spaces\":[{\"description\":\"An + application that leverages zero-shot image classification to find best captions + to generate an image. \",\"id\":\"pharma/CLIP-Interrogator\"},{\"description\":\"An + application to compare different zero-shot image classification models. \",\"id\":\"merve/compare_clip_siglip\"}],\"summary\":\"Zero-shot + image classification is the task of classifying previously unseen classes + during training of a model.\",\"widgetModels\":[\"google/siglip-so400m-patch14-224\"],\"youtubeId\":\"\",\"id\":\"zero-shot-image-classification\",\"label\":\"Zero-Shot + Image Classification\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"zero-shot-object-detection\":{\"datasets\":[],\"demo\":{\"inputs\":[{\"filename\":\"zero-shot-object-detection-input.jpg\",\"type\":\"img\"},{\"label\":\"Classes\",\"content\":\"cat, + dog, bird\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"zero-shot-object-detection-output.jpg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + Average Precision (AP) metric is the Area Under the PR Curve (AUC-PR). It + is calculated for each class separately\",\"id\":\"Average Precision\"},{\"description\":\"The + Mean Average Precision (mAP) metric is the overall average of the AP values\",\"id\":\"Mean + Average Precision\"},{\"description\":\"The AP\u03B1 metric is the Average + Precision at the IoU threshold of a \u03B1 value, for example, AP50 and AP75\",\"id\":\"AP\u03B1\"}],\"models\":[{\"description\":\"Solid + zero-shot object detection model.\",\"id\":\"IDEA-Research/grounding-dino-base\"},{\"description\":\"Cutting-edge + zero-shot object detection model.\",\"id\":\"google/owlv2-base-patch16-ensemble\"}],\"spaces\":[{\"description\":\"A + demo to try the state-of-the-art zero-shot object detection model, OWLv2.\",\"id\":\"merve/owlv2\"},{\"description\":\"A + demo that combines a zero-shot object detection and mask generation model + for zero-shot segmentation.\",\"id\":\"merve/OWLSAM\"}],\"summary\":\"Zero-shot + object detection is a computer vision task to detect objects and their classes + in images, without any prior training or knowledge of the classes. Zero-shot + object detection models receive an image as input, as well as a list of candidate + classes, and output the bounding boxes and labels where the objects have been + detected.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"zero-shot-object-detection\",\"label\":\"Zero-Shot + Object Detection\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"text-to-3d\":{\"datasets\":[{\"description\":\"A + large dataset of over 10 million 3D objects.\",\"id\":\"allenai/objaverse-xl\"},{\"description\":\"Descriptive + captions for 3D objects in Objaverse.\",\"id\":\"tiange/Cap3D\"}],\"demo\":{\"inputs\":[{\"label\":\"Prompt\",\"content\":\"a + cat statue\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Result\",\"content\":\"text-to-3d-3d-output-filename.glb\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"Text-to-3D + mesh model by OpenAI\",\"id\":\"openai/shap-e\"},{\"description\":\"Generative + 3D gaussian splatting model.\",\"id\":\"ashawkey/LGM\"}],\"spaces\":[{\"description\":\"Text-to-3D + demo with mesh outputs.\",\"id\":\"hysts/Shap-E\"},{\"description\":\"Text/image-to-3D + demo with splat outputs.\",\"id\":\"ashawkey/LGM\"}],\"summary\":\"Text-to-3D + models take in text input and produce 3D output.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"text-to-3d\",\"label\":\"Text-to-3D\",\"libraries\":[\"diffusers\"]},\"image-to-3d\":{\"datasets\":[{\"description\":\"A + large dataset of over 10 million 3D objects.\",\"id\":\"allenai/objaverse-xl\"},{\"description\":\"A + dataset of isolated object images for evaluating image-to-3D models.\",\"id\":\"dylanebert/iso3d\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-to-3d-image-input.png\",\"type\":\"img\"}],\"outputs\":[{\"label\":\"Result\",\"content\":\"image-to-3d-3d-output-filename.glb\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"Fast + image-to-3D mesh model by Tencent.\",\"id\":\"TencentARC/InstantMesh\"},{\"description\":\"Fast + image-to-3D mesh model by StabilityAI\",\"id\":\"stabilityai/TripoSR\"},{\"description\":\"A + scaled up image-to-3D mesh model derived from TripoSR.\",\"id\":\"hwjiang/Real3D\"},{\"description\":\"Generative + 3D gaussian splatting model.\",\"id\":\"ashawkey/LGM\"}],\"spaces\":[{\"description\":\"Leaderboard + to evaluate image-to-3D models.\",\"id\":\"dylanebert/3d-arena\"},{\"description\":\"Image-to-3D + demo with mesh outputs.\",\"id\":\"TencentARC/InstantMesh\"},{\"description\":\"Image-to-3D + demo with mesh outputs.\",\"id\":\"stabilityai/TripoSR\"},{\"description\":\"Image-to-3D + demo with mesh outputs.\",\"id\":\"hwjiang/Real3D\"},{\"description\":\"Image-to-3D + demo with splat outputs.\",\"id\":\"dylanebert/LGM-mini\"}],\"summary\":\"Image-to-3D + models take in image input and produce 3D output.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"image-to-3d\",\"label\":\"Image-to-3D\",\"libraries\":[\"diffusers\"]}}" + headers: + Access-Control-Allow-Origin: + - https://huggingface.co + Access-Control-Expose-Headers: + - X-Repo-Commit,X-Request-Id,X-Error-Code,X-Error-Message,X-Total-Count,ETag,Link,Accept-Ranges,Content-Range + Connection: + - keep-alive + Content-Length: + - '73726' + Content-Type: + - application/json; charset=utf-8 + Date: + - Thu, 31 Oct 2024 13:50:33 GMT + ETag: + - W/"11ffe-LYeRHzSmA1Ja2XTx5UWNPygcpnc" + Referrer-Policy: + - strict-origin-when-cross-origin + Vary: + - Origin + Via: + - 1.1 d3e5607910a603adbab92d44d38b797c.cloudfront.net (CloudFront) + X-Amz-Cf-Id: + - z2UEXsD_OXY2hlLeC68SGSBD62BiHCgHS4mtJ2rGjIkM7jHcwVRhww== + X-Amz-Cf-Pop: + - CCU50-P1 + X-Cache: + - Miss from cloudfront + X-Powered-By: + - huggingface-moon + X-Request-Id: + - Root=1-67238b29-58fd85ff498c7196495eeb72;0d5401a3-1cb7-41e8-92ee-1c9fbb42c279 + cross-origin-opener-policy: + - same-origin + status: + code: 200 + message: OK +- request: + body: null + headers: + user-agent: + - unknown/None; hf_hub/0.26.2; python/3.9.12; torch/2.4.1 + method: POST + uri: https://api-inference.huggingface.co/models/distilbert/distilbert-base-uncased-finetuned-sst-2-english + response: + body: + string: '[[{"label":"POSITIVE","score":0.9998695850372314},{"label":"NEGATIVE","score":0.00013043530634604394}]]' + headers: + Access-Control-Allow-Credentials: + - 'true' + Connection: + - keep-alive + Content-Length: + - '103' + Content-Type: + - application/json + Date: + - Thu, 31 Oct 2024 13:50:34 GMT + Vary: + - Origin, Access-Control-Request-Method, Access-Control-Request-Headers + x-compute-time: + - '0.016' + x-compute-type: + - cache + x-request-id: + - c1GGyyLi14_tyIvEnTH39 + x-sha: + - 714eb0fa89d2f80546fda750413ed43d93601a13 + status: + code: 200 + message: OK +version: 1 diff --git a/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_token_classification.yaml b/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_token_classification.yaml new file mode 100644 index 00000000000..eebfd128c9f --- /dev/null +++ b/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_token_classification.yaml @@ -0,0 +1,785 @@ +interactions: +- request: + body: null + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate, br, zstd + Connection: + - keep-alive + X-Amzn-Trace-Id: + - 7f98dc88-ecd7-4767-8d7c-6dfac377ae6b + user-agent: + - unknown/None; hf_hub/0.26.2; python/3.9.12; torch/2.4.1 + method: GET + uri: https://huggingface.co/api/tasks + response: + body: + string: "{\"any-to-any\":{\"datasets\":[],\"demo\":{\"inputs\":[],\"outputs\":[]},\"isPlaceholder\":true,\"metrics\":[],\"models\":[],\"spaces\":[],\"summary\":\"\",\"widgetModels\":[],\"id\":\"any-to-any\",\"label\":\"Any-to-Any\",\"libraries\":[\"transformers\"]},\"audio-classification\":{\"datasets\":[{\"description\":\"A + benchmark of 10 different audio tasks.\",\"id\":\"s3prl/superb\"},{\"description\":\"A + dataset of YouTube clips and their sound categories.\",\"id\":\"agkphysics/AudioSet\"}],\"demo\":{\"inputs\":[{\"filename\":\"audio.wav\",\"type\":\"audio\"}],\"outputs\":[{\"data\":[{\"label\":\"Up\",\"score\":0.2},{\"label\":\"Down\",\"score\":0.8}],\"type\":\"chart\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"An + easy-to-use model for command recognition.\",\"id\":\"speechbrain/google_speech_command_xvector\"},{\"description\":\"An + emotion recognition model.\",\"id\":\"ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition\"},{\"description\":\"A + language identification model.\",\"id\":\"facebook/mms-lid-126\"}],\"spaces\":[{\"description\":\"An + application that can classify music into different genre.\",\"id\":\"kurianbenoy/audioclassification\"}],\"summary\":\"Audio + classification is the task of assigning a label or class to a given audio. + It can be used for recognizing which command a user is giving or the emotion + of a statement, as well as identifying a speaker.\",\"widgetModels\":[\"MIT/ast-finetuned-audioset-10-10-0.4593\"],\"youtubeId\":\"KWwzcmG98Ds\",\"id\":\"audio-classification\",\"label\":\"Audio + Classification\",\"libraries\":[\"speechbrain\",\"transformers\",\"transformers.js\"]},\"audio-to-audio\":{\"datasets\":[{\"description\":\"512-element + X-vector embeddings of speakers from CMU ARCTIC dataset.\",\"id\":\"Matthijs/cmu-arctic-xvectors\"}],\"demo\":{\"inputs\":[{\"filename\":\"input.wav\",\"type\":\"audio\"}],\"outputs\":[{\"filename\":\"label-0.wav\",\"type\":\"audio\"},{\"filename\":\"label-1.wav\",\"type\":\"audio\"}]},\"metrics\":[{\"description\":\"The + Signal-to-Noise ratio is the relationship between the target signal level + and the background noise level. It is calculated as the logarithm of the target + signal divided by the background noise, in decibels.\",\"id\":\"snri\"},{\"description\":\"The + Signal-to-Distortion ratio is the relationship between the target signal and + the sum of noise, interference, and artifact errors\",\"id\":\"sdri\"}],\"models\":[{\"description\":\"A + solid model of audio source separation.\",\"id\":\"speechbrain/sepformer-wham\"},{\"description\":\"A + speech enhancement model.\",\"id\":\"ResembleAI/resemble-enhance\"},{\"description\":\"A + model that can change the voice in a speech recording.\",\"id\":\"microsoft/speecht5_vc\"}],\"spaces\":[{\"description\":\"An + application for speech separation.\",\"id\":\"younver/speechbrain-speech-separation\"},{\"description\":\"An + application for audio style transfer.\",\"id\":\"nakas/audio-diffusion_style_transfer\"}],\"summary\":\"Audio-to-Audio + is a family of tasks in which the input is an audio and the output is one + or multiple generated audios. Some example tasks are speech enhancement and + source separation.\",\"widgetModels\":[\"speechbrain/sepformer-wham\"],\"youtubeId\":\"iohj7nCCYoM\",\"id\":\"audio-to-audio\",\"label\":\"Audio-to-Audio\",\"libraries\":[\"asteroid\",\"fairseq\",\"speechbrain\"]},\"automatic-speech-recognition\":{\"datasets\":[{\"description\":\"31,175 + hours of multilingual audio-text dataset in 108 languages.\",\"id\":\"mozilla-foundation/common_voice_17_0\"},{\"description\":\"A + dataset with 44.6k hours of English speaker data and 6k hours of other language + speakers.\",\"id\":\"parler-tts/mls_eng\"},{\"description\":\"A multi-lingual + audio dataset with 370K hours of audio.\",\"id\":\"espnet/yodas\"}],\"demo\":{\"inputs\":[{\"filename\":\"input.flac\",\"type\":\"audio\"}],\"outputs\":[{\"label\":\"Transcript\",\"content\":\"Going + along slushy country roads and speaking to damp audiences in...\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"wer\"},{\"description\":\"\",\"id\":\"cer\"}],\"models\":[{\"description\":\"A + powerful ASR model by OpenAI.\",\"id\":\"openai/whisper-large-v3\"},{\"description\":\"A + good generic speech model by MetaAI for fine-tuning.\",\"id\":\"facebook/w2v-bert-2.0\"},{\"description\":\"An + end-to-end model that performs ASR and Speech Translation by MetaAI.\",\"id\":\"facebook/seamless-m4t-v2-large\"},{\"description\":\"Powerful + speaker diarization model.\",\"id\":\"pyannote/speaker-diarization-3.1\"}],\"spaces\":[{\"description\":\"A + powerful general-purpose speech recognition application.\",\"id\":\"hf-audio/whisper-large-v3\"},{\"description\":\"Fastest + speech recognition application.\",\"id\":\"sanchit-gandhi/whisper-jax\"},{\"description\":\"A + high quality speech and text translation model by Meta.\",\"id\":\"facebook/seamless_m4t\"}],\"summary\":\"Automatic + Speech Recognition (ASR), also known as Speech to Text (STT), is the task + of transcribing a given audio to text. It has many applications, such as voice + user interfaces.\",\"widgetModels\":[\"openai/whisper-large-v3\"],\"youtubeId\":\"TksaY_FDgnk\",\"id\":\"automatic-speech-recognition\",\"label\":\"Automatic + Speech Recognition\",\"libraries\":[\"espnet\",\"nemo\",\"speechbrain\",\"transformers\",\"transformers.js\"]},\"depth-estimation\":{\"datasets\":[{\"description\":\"NYU + Depth V2 Dataset: Video dataset containing both RGB and depth sensor data.\",\"id\":\"sayakpaul/nyu_depth_v2\"},{\"description\":\"Monocular + depth estimation benchmark based without noise and errors.\",\"id\":\"depth-anything/DA-2K\"}],\"demo\":{\"inputs\":[{\"filename\":\"depth-estimation-input.jpg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"depth-estimation-output.png\",\"type\":\"img\"}]},\"metrics\":[],\"models\":[{\"description\":\"Cutting-edge + depth estimation model.\",\"id\":\"depth-anything/Depth-Anything-V2-Large\"},{\"description\":\"A + strong monocular depth estimation model.\",\"id\":\"jingheya/lotus-depth-g-v1-0\"},{\"description\":\"A + depth estimation model that predicts depth in videos.\",\"id\":\"tencent/DepthCrafter\"},{\"description\":\"A + robust depth estimation model.\",\"id\":\"apple/DepthPro\"}],\"spaces\":[{\"description\":\"An + application that predicts the depth of an image and then reconstruct the 3D + model as voxels.\",\"id\":\"radames/dpt-depth-estimation-3d-voxels\"},{\"description\":\"An + application for bleeding-edge depth estimation.\",\"id\":\"akhaliq/depth-pro\"},{\"description\":\"An + application on cutting-edge depth estimation in videos.\",\"id\":\"tencent/DepthCrafter\"},{\"description\":\"A + human-centric depth estimation application.\",\"id\":\"facebook/sapiens-depth\"}],\"summary\":\"Depth + estimation is the task of predicting depth of the objects present in an image.\",\"widgetModels\":[\"\"],\"youtubeId\":\"\",\"id\":\"depth-estimation\",\"label\":\"Depth + Estimation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"document-question-answering\":{\"datasets\":[{\"description\":\"Largest + document understanding dataset.\",\"id\":\"HuggingFaceM4/Docmatix\"},{\"description\":\"Dataset + from the 2020 DocVQA challenge. The documents are taken from the UCSF Industry + Documents Library.\",\"id\":\"eliolio/docvqa\"}],\"demo\":{\"inputs\":[{\"label\":\"Question\",\"content\":\"What + is the idea behind the consumer relations efficiency team?\",\"type\":\"text\"},{\"filename\":\"document-question-answering-input.png\",\"type\":\"img\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"Balance + cost efficiency with quality customer service\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"The + evaluation metric for the DocVQA challenge is the Average Normalized Levenshtein + Similarity (ANLS). This metric is flexible to character regognition errors + and compares the predicted answer with the ground truth answer.\",\"id\":\"anls\"},{\"description\":\"Exact + Match is a metric based on the strict character match of the predicted answer + and the right answer. For answers predicted correctly, the Exact Match will + be 1. Even if only one character is different, Exact Match will be 0\",\"id\":\"exact-match\"}],\"models\":[{\"description\":\"A + robust document question answering model.\",\"id\":\"impira/layoutlm-document-qa\"},{\"description\":\"A + document question answering model specialized in invoices.\",\"id\":\"impira/layoutlm-invoices\"},{\"description\":\"A + special model for OCR-free document question answering.\",\"id\":\"microsoft/udop-large\"},{\"description\":\"A + powerful model for document question answering.\",\"id\":\"google/pix2struct-docvqa-large\"}],\"spaces\":[{\"description\":\"A + robust document question answering application.\",\"id\":\"impira/docquery\"},{\"description\":\"An + application that can answer questions from invoices.\",\"id\":\"impira/invoices\"},{\"description\":\"An + application to compare different document question answering models.\",\"id\":\"merve/compare_docvqa_models\"}],\"summary\":\"Document + Question Answering (also known as Document Visual Question Answering) is the + task of answering questions on document images. Document question answering + models take a (document, question) pair as input and return an answer in natural + language. Models usually rely on multi-modal features, combining text, position + of words (bounding-boxes) and image.\",\"widgetModels\":[\"impira/layoutlm-invoices\"],\"youtubeId\":\"\",\"id\":\"document-question-answering\",\"label\":\"Document + Question Answering\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"feature-extraction\":{\"datasets\":[{\"description\":\"Wikipedia + dataset containing cleaned articles of all languages. Can be used to train + `feature-extraction` models.\",\"id\":\"wikipedia\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"India, + officially the Republic of India, is a country in South Asia.\",\"type\":\"text\"}],\"outputs\":[{\"table\":[[\"Dimension + 1\",\"Dimension 2\",\"Dimension 3\"],[\"2.583383083343506\",\"2.757075071334839\",\"0.9023529887199402\"],[\"8.29393482208252\",\"1.1071064472198486\",\"2.03399395942688\"],[\"-0.7754912972450256\",\"-1.647324562072754\",\"-0.6113331913948059\"],[\"0.07087723910808563\",\"1.5942802429199219\",\"1.4610432386398315\"]],\"type\":\"tabular\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + powerful feature extraction model for natural language processing tasks.\",\"id\":\"thenlper/gte-large\"},{\"description\":\"A + strong feature extraction model for retrieval.\",\"id\":\"Alibaba-NLP/gte-Qwen1.5-7B-instruct\"}],\"spaces\":[{\"description\":\"A + leaderboard to rank text feature extraction models based on a benchmark.\",\"id\":\"mteb/leaderboard\"},{\"description\":\"A + leaderboard to rank best feature extraction models based on human feedback.\",\"id\":\"mteb/arena\"}],\"summary\":\"Feature + extraction is the task of extracting features learnt in a model.\",\"widgetModels\":[\"facebook/bart-base\"],\"id\":\"feature-extraction\",\"label\":\"Feature + Extraction\",\"libraries\":[\"sentence-transformers\",\"transformers\",\"transformers.js\"]},\"fill-mask\":{\"datasets\":[{\"description\":\"A + common dataset that is used to train models for many languages.\",\"id\":\"wikipedia\"},{\"description\":\"A + large English dataset with text crawled from the web.\",\"id\":\"c4\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"The + barked at me\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"wolf\",\"score\":0.487},{\"label\":\"dog\",\"score\":0.061},{\"label\":\"cat\",\"score\":0.058},{\"label\":\"fox\",\"score\":0.047},{\"label\":\"squirrel\",\"score\":0.025}]}]},\"metrics\":[{\"description\":\"Cross + Entropy is a metric that calculates the difference between two probability + distributions. Each probability distribution is the distribution of predicted + words\",\"id\":\"cross_entropy\"},{\"description\":\"Perplexity is the exponential + of the cross-entropy loss. It evaluates the probabilities assigned to the + next word by the model. Lower perplexity indicates better performance\",\"id\":\"perplexity\"}],\"models\":[{\"description\":\"The + famous BERT model.\",\"id\":\"google-bert/bert-base-uncased\"},{\"description\":\"A + multilingual model trained on 100 languages.\",\"id\":\"FacebookAI/xlm-roberta-base\"}],\"spaces\":[],\"summary\":\"Masked + language modeling is the task of masking some of the words in a sentence and + predicting which words should replace those masks. These models are useful + when we want to get a statistical understanding of the language in which the + model is trained in.\",\"widgetModels\":[\"distilroberta-base\"],\"youtubeId\":\"mqElG5QJWUg\",\"id\":\"fill-mask\",\"label\":\"Fill-Mask\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"image-classification\":{\"datasets\":[{\"description\":\"Benchmark + dataset used for image classification with images that belong to 100 classes.\",\"id\":\"cifar100\"},{\"description\":\"Dataset + consisting of images of garments.\",\"id\":\"fashion_mnist\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-classification-input.jpeg\",\"type\":\"img\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Egyptian + cat\",\"score\":0.514},{\"label\":\"Tabby cat\",\"score\":0.193},{\"label\":\"Tiger + cat\",\"score\":0.068}]}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + strong image classification model.\",\"id\":\"google/vit-base-patch16-224\"},{\"description\":\"A + robust image classification model.\",\"id\":\"facebook/deit-base-distilled-patch16-224\"},{\"description\":\"A + strong image classification model.\",\"id\":\"facebook/convnext-large-224\"}],\"spaces\":[{\"description\":\"An + application that classifies what a given image is about.\",\"id\":\"nielsr/perceiver-image-classification\"}],\"summary\":\"Image + classification is the task of assigning a label or class to an entire image. + Images are expected to have only one class for each image. Image classification + models take an image as input and return a prediction about which class the + image belongs to.\",\"widgetModels\":[\"google/vit-base-patch16-224\"],\"youtubeId\":\"tjAIM7BOYhw\",\"id\":\"image-classification\",\"label\":\"Image + Classification\",\"libraries\":[\"keras\",\"timm\",\"transformers\",\"transformers.js\"]},\"image-feature-extraction\":{\"datasets\":[{\"description\":\"ImageNet-1K + is a image classification dataset in which images are used to train image-feature-extraction + models.\",\"id\":\"imagenet-1k\"}],\"demo\":{\"inputs\":[{\"filename\":\"mask-generation-input.png\",\"type\":\"img\"}],\"outputs\":[{\"table\":[[\"Dimension + 1\",\"Dimension 2\",\"Dimension 3\"],[\"0.21236686408519745\",\"1.0919708013534546\",\"0.8512550592422485\"],[\"0.809657871723175\",\"-0.18544459342956543\",\"-0.7851548194885254\"],[\"1.3103108406066895\",\"-0.2479034662246704\",\"-0.9107287526130676\"],[\"1.8536205291748047\",\"-0.36419737339019775\",\"0.09717650711536407\"]],\"type\":\"tabular\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + powerful image feature extraction model.\",\"id\":\"timm/vit_large_patch14_dinov2.lvd142m\"},{\"description\":\"A + strong image feature extraction model.\",\"id\":\"nvidia/MambaVision-T-1K\"},{\"description\":\"A + robust image feature extraction model.\",\"id\":\"facebook/dino-vitb16\"},{\"description\":\"Strong + image feature extraction model made for information retrieval from documents.\",\"id\":\"vidore/colpali\"},{\"description\":\"Strong + image feature extraction model that can be used on images and documents.\",\"id\":\"OpenGVLab/InternViT-6B-448px-V1-2\"}],\"spaces\":[],\"summary\":\"Image + feature extraction is the task of extracting features learnt in a computer + vision model.\",\"widgetModels\":[],\"id\":\"image-feature-extraction\",\"label\":\"Image + Feature Extraction\",\"libraries\":[\"timm\",\"transformers\"]},\"image-segmentation\":{\"datasets\":[{\"description\":\"Scene + segmentation dataset.\",\"id\":\"scene_parse_150\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-segmentation-input.jpeg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"image-segmentation-output.png\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"Average + Precision (AP) is the Area Under the PR Curve (AUC-PR). It is calculated for + each semantic class separately\",\"id\":\"Average Precision\"},{\"description\":\"Mean + Average Precision (mAP) is the overall average of the AP values\",\"id\":\"Mean + Average Precision\"},{\"description\":\"Intersection over Union (IoU) is the + overlap of segmentation masks. Mean IoU is the average of the IoU of all semantic + classes\",\"id\":\"Mean Intersection over Union\"},{\"description\":\"AP\u03B1 + is the Average Precision at the IoU threshold of a \u03B1 value, for example, + AP50 and AP75\",\"id\":\"AP\u03B1\"}],\"models\":[{\"description\":\"Solid + semantic segmentation model trained on ADE20k.\",\"id\":\"openmmlab/upernet-convnext-small\"},{\"description\":\"Background + removal model.\",\"id\":\"briaai/RMBG-1.4\"},{\"description\":\"A multipurpose + image segmentation model for high resolution images.\",\"id\":\"ZhengPeng7/BiRefNet\"},{\"description\":\"Powerful + human-centric image segmentation model.\",\"id\":\"facebook/sapiens-seg-1b\"},{\"description\":\"Panoptic + segmentation model trained on the COCO (common objects) dataset.\",\"id\":\"facebook/mask2former-swin-large-coco-panoptic\"}],\"spaces\":[{\"description\":\"A + semantic segmentation application that can predict unseen instances out of + the box.\",\"id\":\"facebook/ov-seg\"},{\"description\":\"One of the strongest + segmentation applications.\",\"id\":\"jbrinkma/segment-anything\"},{\"description\":\"A + human-centric segmentation model.\",\"id\":\"facebook/sapiens-pose\"},{\"description\":\"An + instance segmentation application to predict neuronal cell types from microscopy + images.\",\"id\":\"rashmi/sartorius-cell-instance-segmentation\"},{\"description\":\"An + application that segments videos.\",\"id\":\"ArtGAN/Segment-Anything-Video\"},{\"description\":\"An + panoptic segmentation application built for outdoor environments.\",\"id\":\"segments/panoptic-segment-anything\"}],\"summary\":\"Image + Segmentation divides an image into segments where each pixel in the image + is mapped to an object. This task has multiple variants such as instance segmentation, + panoptic segmentation and semantic segmentation.\",\"widgetModels\":[\"nvidia/segformer-b0-finetuned-ade-512-512\"],\"youtubeId\":\"dKE8SIt9C-w\",\"id\":\"image-segmentation\",\"label\":\"Image + Segmentation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"image-to-image\":{\"datasets\":[{\"description\":\"Synthetic + dataset, for image relighting\",\"id\":\"VIDIT\"},{\"description\":\"Multiple + images of celebrities, used for facial expression translation\",\"id\":\"huggan/CelebA-faces\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-to-image-input.jpeg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"image-to-image-output.png\",\"type\":\"img\"}]},\"isPlaceholder\":false,\"metrics\":[{\"description\":\"Peak + Signal to Noise Ratio (PSNR) is an approximation of the human perception, + considering the ratio of the absolute intensity with respect to the variations. + Measured in dB, a high value indicates a high fidelity.\",\"id\":\"PSNR\"},{\"description\":\"Structural + Similarity Index (SSIM) is a perceptual metric which compares the luminance, + contrast and structure of two images. The values of SSIM range between -1 + and 1, and higher values indicate closer resemblance to the original image.\",\"id\":\"SSIM\"},{\"description\":\"Inception + Score (IS) is an analysis of the labels predicted by an image classification + model when presented with a sample of the generated images.\",\"id\":\"IS\"}],\"models\":[{\"description\":\"An + image-to-image model to improve image resolution.\",\"id\":\"fal/AuraSR-v2\"},{\"description\":\"A + model that increases the resolution of an image.\",\"id\":\"keras-io/super-resolution\"},{\"description\":\"A + model that creates a set of variations of the input image in the style of + DALL-E using Stable Diffusion.\",\"id\":\"lambdalabs/sd-image-variations-diffusers\"},{\"description\":\"A + model that generates images based on segments in the input image and the text + prompt.\",\"id\":\"mfidabel/controlnet-segment-anything\"},{\"description\":\"A + model that takes an image and an instruction to edit the image.\",\"id\":\"timbrooks/instruct-pix2pix\"}],\"spaces\":[{\"description\":\"Image + enhancer application for low light.\",\"id\":\"keras-io/low-light-image-enhancement\"},{\"description\":\"Style + transfer application.\",\"id\":\"keras-io/neural-style-transfer\"},{\"description\":\"An + application that generates images based on segment control.\",\"id\":\"mfidabel/controlnet-segment-anything\"},{\"description\":\"Image + generation application that takes image control and text prompt.\",\"id\":\"hysts/ControlNet\"},{\"description\":\"Colorize + any image using this app.\",\"id\":\"ioclab/brightness-controlnet\"},{\"description\":\"Edit + images with instructions.\",\"id\":\"timbrooks/instruct-pix2pix\"}],\"summary\":\"Image-to-image + is the task of transforming an input image through a variety of possible manipulations + and enhancements, such as super-resolution, image inpainting, colorization, + and more.\",\"widgetModels\":[\"stabilityai/stable-diffusion-2-inpainting\"],\"youtubeId\":\"\",\"id\":\"image-to-image\",\"label\":\"Image-to-Image\",\"libraries\":[\"diffusers\",\"transformers\",\"transformers.js\"]},\"image-text-to-text\":{\"datasets\":[{\"description\":\"Instructions + composed of image and text.\",\"id\":\"liuhaotian/LLaVA-Instruct-150K\"},{\"description\":\"Conversation + turns where questions involve image and text.\",\"id\":\"liuhaotian/LLaVA-Pretrain\"},{\"description\":\"A + collection of datasets made for model fine-tuning.\",\"id\":\"HuggingFaceM4/the_cauldron\"},{\"description\":\"Screenshots + of websites with their HTML/CSS codes.\",\"id\":\"HuggingFaceM4/WebSight\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-text-to-text-input.png\",\"type\":\"img\"},{\"label\":\"Text + Prompt\",\"content\":\"Describe the position of the bee in detail.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"The + bee is sitting on a pink flower, surrounded by other flowers. The bee is positioned + in the center of the flower, with its head and front legs sticking out.\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"Powerful + vision language model with great visual understanding and reasoning capabilities.\",\"id\":\"meta-llama/Llama-3.2-11B-Vision-Instruct\"},{\"description\":\"Cutting-edge + vision language models.\",\"id\":\"allenai/Molmo-7B-D-0924\"},{\"description\":\"Small + yet powerful model.\",\"id\":\"vikhyatk/moondream2\"},{\"description\":\"Strong + image-text-to-text model.\",\"id\":\"Qwen/Qwen2-VL-7B-Instruct\"},{\"description\":\"Strong + image-text-to-text model.\",\"id\":\"mistralai/Pixtral-12B-2409\"},{\"description\":\"Strong + image-text-to-text model focused on documents.\",\"id\":\"stepfun-ai/GOT-OCR2_0\"}],\"spaces\":[{\"description\":\"Leaderboard + to evaluate vision language models.\",\"id\":\"opencompass/open_vlm_leaderboard\"},{\"description\":\"Vision + language models arena, where models are ranked by votes of users.\",\"id\":\"WildVision/vision-arena\"},{\"description\":\"Powerful + vision-language model assistant.\",\"id\":\"akhaliq/Molmo-7B-D-0924\"},{\"description\":\"An + image-text-to-text application focused on documents.\",\"id\":\"stepfun-ai/GOT_official_online_demo\"},{\"description\":\"An + application to compare outputs of different vision language models.\",\"id\":\"merve/compare_VLMs\"},{\"description\":\"An + application for chatting with an image-text-to-text model.\",\"id\":\"GanymedeNil/Qwen2-VL-7B\"}],\"summary\":\"Image-text-to-text + models take in an image and text prompt and output text. These models are + also called vision-language models, or VLMs. The difference from image-to-text + models is that these models take an additional text input, not restricting + the model to certain use cases like image captioning, and may also be trained + to accept a conversation as input.\",\"widgetModels\":[\"meta-llama/Llama-3.2-11B-Vision-Instruct\"],\"youtubeId\":\"IoGaGfU1CIg\",\"id\":\"image-text-to-text\",\"label\":\"Image-Text-to-Text\",\"libraries\":[\"transformers\"]},\"image-to-text\":{\"datasets\":[{\"description\":\"Dataset + from 12M image-text of Reddit\",\"id\":\"red_caps\"},{\"description\":\"Dataset + from 3.3M images of Google\",\"id\":\"datasets/conceptual_captions\"}],\"demo\":{\"inputs\":[{\"filename\":\"savanna.jpg\",\"type\":\"img\"}],\"outputs\":[{\"label\":\"Detailed + description\",\"content\":\"a herd of giraffes and zebras grazing in a field\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + robust image captioning model.\",\"id\":\"Salesforce/blip2-opt-2.7b\"},{\"description\":\"A + powerful and accurate image-to-text model that can also localize concepts + in images.\",\"id\":\"microsoft/kosmos-2-patch14-224\"},{\"description\":\"A + strong optical character recognition model.\",\"id\":\"facebook/nougat-base\"},{\"description\":\"A + powerful model that lets you have a conversation with the image.\",\"id\":\"llava-hf/llava-1.5-7b-hf\"}],\"spaces\":[{\"description\":\"An + application that compares various image captioning models.\",\"id\":\"nielsr/comparing-captioning-models\"},{\"description\":\"A + robust image captioning application.\",\"id\":\"flax-community/image-captioning\"},{\"description\":\"An + application that transcribes handwritings into text.\",\"id\":\"nielsr/TrOCR-handwritten\"},{\"description\":\"An + application that can caption images and answer questions about a given image.\",\"id\":\"Salesforce/BLIP\"},{\"description\":\"An + application that can caption images and answer questions with a conversational + agent.\",\"id\":\"Salesforce/BLIP2\"},{\"description\":\"An image captioning + application that demonstrates the effect of noise on captions.\",\"id\":\"johko/capdec-image-captioning\"}],\"summary\":\"Image + to text models output a text from a given image. Image captioning or optical + character recognition can be considered as the most common applications of + image to text.\",\"widgetModels\":[\"Salesforce/blip-image-captioning-large\"],\"youtubeId\":\"\",\"id\":\"image-to-text\",\"label\":\"Image-to-Text\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"keypoint-detection\":{\"datasets\":[{\"description\":\"A + dataset of hand keypoints of over 500k examples.\",\"id\":\"Vincent-luo/hagrid-mediapipe-hands\"}],\"demo\":{\"inputs\":[{\"filename\":\"keypoint-detection-input.png\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"keypoint-detection-output.png\",\"type\":\"img\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + robust keypoint detection model.\",\"id\":\"magic-leap-community/superpoint\"},{\"description\":\"Strong + keypoint detection model used to detect human pose.\",\"id\":\"facebook/sapiens-pose-1b\"}],\"spaces\":[{\"description\":\"An + application that detects hand keypoints in real-time.\",\"id\":\"datasciencedojo/Hand-Keypoint-Detection-Realtime\"},{\"description\":\"An + application to try a universal keypoint detection model.\",\"id\":\"merve/SuperPoint\"}],\"summary\":\"Keypoint + detection is the task of identifying meaningful distinctive points or features + in an image.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"keypoint-detection\",\"label\":\"Keypoint + Detection\",\"libraries\":[\"transformers\"]},\"mask-generation\":{\"datasets\":[],\"demo\":{\"inputs\":[{\"filename\":\"mask-generation-input.png\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"mask-generation-output.png\",\"type\":\"img\"}]},\"metrics\":[],\"models\":[{\"description\":\"Small + yet powerful mask generation model.\",\"id\":\"Zigeng/SlimSAM-uniform-50\"},{\"description\":\"Very + strong mask generation model.\",\"id\":\"facebook/sam2-hiera-large\"}],\"spaces\":[{\"description\":\"An + application that combines a mask generation model with a zero-shot object + detection model for text-guided image segmentation.\",\"id\":\"merve/OWLSAM2\"},{\"description\":\"An + application that compares the performance of a large and a small mask generation + model.\",\"id\":\"merve/slimsam\"},{\"description\":\"An application based + on an improved mask generation model.\",\"id\":\"SkalskiP/segment-anything-model-2\"},{\"description\":\"An + application to remove objects from videos using mask generation models.\",\"id\":\"SkalskiP/SAM_and_ProPainter\"}],\"summary\":\"Mask + generation is the task of generating masks that identify a specific object + or region of interest in a given image. Masks are often used in segmentation + tasks, where they provide a precise way to isolate the object of interest + for further processing or analysis.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"mask-generation\",\"label\":\"Mask + Generation\",\"libraries\":[\"transformers\"]},\"object-detection\":{\"datasets\":[{\"description\":\"Widely + used benchmark dataset for multiple vision tasks.\",\"id\":\"merve/coco2017\"},{\"description\":\"Multi-task + computer vision benchmark.\",\"id\":\"merve/pascal-voc\"}],\"demo\":{\"inputs\":[{\"filename\":\"object-detection-input.jpg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"object-detection-output.jpg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + Average Precision (AP) metric is the Area Under the PR Curve (AUC-PR). It + is calculated for each class separately\",\"id\":\"Average Precision\"},{\"description\":\"The + Mean Average Precision (mAP) metric is the overall average of the AP values\",\"id\":\"Mean + Average Precision\"},{\"description\":\"The AP\u03B1 metric is the Average + Precision at the IoU threshold of a \u03B1 value, for example, AP50 and AP75\",\"id\":\"AP\u03B1\"}],\"models\":[{\"description\":\"Solid + object detection model pre-trained on the COCO 2017 dataset.\",\"id\":\"facebook/detr-resnet-50\"},{\"description\":\"Real-time + and accurate object detection model.\",\"id\":\"jameslahm/yolov10x\"},{\"description\":\"Fast + and accurate object detection model trained on COCO and Object365 datasets.\",\"id\":\"PekingU/rtdetr_r18vd_coco_o365\"}],\"spaces\":[{\"description\":\"Leaderboard + to compare various object detection models across several metrics.\",\"id\":\"hf-vision/object_detection_leaderboard\"},{\"description\":\"An + application that contains various object detection models to try from.\",\"id\":\"Gradio-Blocks/Object-Detection-With-DETR-and-YOLOS\"},{\"description\":\"An + application that shows multiple cutting edge techniques for object detection + and tracking.\",\"id\":\"kadirnar/torchyolo\"},{\"description\":\"An object + tracking, segmentation and inpainting application.\",\"id\":\"VIPLab/Track-Anything\"},{\"description\":\"Very + fast object tracking application based on object detection.\",\"id\":\"merve/RT-DETR-tracking-coco\"}],\"summary\":\"Object + Detection models allow users to identify objects of certain defined classes. + Object detection models receive an image as input and output the images with + bounding boxes and labels on detected objects.\",\"widgetModels\":[\"facebook/detr-resnet-50\"],\"youtubeId\":\"WdAeKSOpxhw\",\"id\":\"object-detection\",\"label\":\"Object + Detection\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"video-classification\":{\"datasets\":[{\"description\":\"Benchmark + dataset used for video classification with videos that belong to 400 classes.\",\"id\":\"kinetics400\"}],\"demo\":{\"inputs\":[{\"filename\":\"video-classification-input.gif\",\"type\":\"img\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Playing + Guitar\",\"score\":0.514},{\"label\":\"Playing Tennis\",\"score\":0.193},{\"label\":\"Cooking\",\"score\":0.068}]}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"Strong + Video Classification model trained on the Kinetics 400 dataset.\",\"id\":\"google/vivit-b-16x2-kinetics400\"},{\"description\":\"Strong + Video Classification model trained on the Kinetics 400 dataset.\",\"id\":\"microsoft/xclip-base-patch32\"}],\"spaces\":[{\"description\":\"An + application that classifies video at different timestamps.\",\"id\":\"nateraw/lavila\"},{\"description\":\"An + application that classifies video.\",\"id\":\"fcakyon/video-classification\"}],\"summary\":\"Video + classification is the task of assigning a label or class to an entire video. + Videos are expected to have only one class for each video. Video classification + models take a video as input and return a prediction about which class the + video belongs to.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"video-classification\",\"label\":\"Video + Classification\",\"libraries\":[\"transformers\"]},\"question-answering\":{\"datasets\":[{\"description\":\"A + famous question answering dataset based on English articles from Wikipedia.\",\"id\":\"squad_v2\"},{\"description\":\"A + dataset of aggregated anonymized actual queries issued to the Google search + engine.\",\"id\":\"natural_questions\"}],\"demo\":{\"inputs\":[{\"label\":\"Question\",\"content\":\"Which + name is also used to describe the Amazon rainforest in English?\",\"type\":\"text\"},{\"label\":\"Context\",\"content\":\"The + Amazon rainforest, also known in English as Amazonia or the Amazon Jungle\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"Amazonia\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Exact + Match is a metric based on the strict character match of the predicted answer + and the right answer. For answers predicted correctly, the Exact Match will + be 1. Even if only one character is different, Exact Match will be 0\",\"id\":\"exact-match\"},{\"description\":\" + The F1-Score metric is useful if we value both false positives and false negatives + equally. The F1-Score is calculated on each word in the predicted sequence + against the correct answer\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + robust baseline model for most question answering domains.\",\"id\":\"deepset/roberta-base-squad2\"},{\"description\":\"Small + yet robust model that can answer questions.\",\"id\":\"distilbert/distilbert-base-cased-distilled-squad\"},{\"description\":\"A + special model that can answer questions from tables.\",\"id\":\"google/tapas-base-finetuned-wtq\"}],\"spaces\":[{\"description\":\"An + application that can answer a long question from Wikipedia.\",\"id\":\"deepset/wikipedia-assistant\"}],\"summary\":\"Question + Answering models can retrieve the answer to a question from a given text, + which is useful for searching for an answer in a document. Some question answering + models can generate answers without context!\",\"widgetModels\":[\"deepset/roberta-base-squad2\"],\"youtubeId\":\"ajPx5LwJD-I\",\"id\":\"question-answering\",\"label\":\"Question + Answering\",\"libraries\":[\"adapter-transformers\",\"allennlp\",\"transformers\",\"transformers.js\"]},\"reinforcement-learning\":{\"datasets\":[{\"description\":\"A + curation of widely used datasets for Data Driven Deep Reinforcement Learning + (D4RL)\",\"id\":\"edbeeching/decision_transformer_gym_replay\"}],\"demo\":{\"inputs\":[{\"label\":\"State\",\"content\":\"Red + traffic light, pedestrians are about to pass.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Action\",\"content\":\"Stop + the car.\",\"type\":\"text\"},{\"label\":\"Next State\",\"content\":\"Yellow + light, pedestrians have crossed.\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Accumulated + reward across all time steps discounted by a factor that ranges between 0 + and 1 and determines how much the agent optimizes for future relative to immediate + rewards. Measures how good is the policy ultimately found by a given algorithm + considering uncertainty over the future.\",\"id\":\"Discounted Total Reward\"},{\"description\":\"Average + return obtained after running the policy for a certain number of evaluation + episodes. As opposed to total reward, mean reward considers how much reward + a given algorithm receives while learning.\",\"id\":\"Mean Reward\"},{\"description\":\"Measures + how good a given algorithm is after a predefined time. Some algorithms may + be guaranteed to converge to optimal behavior across many time steps. However, + an agent that reaches an acceptable level of optimality after a given time + horizon may be preferable to one that ultimately reaches optimality but takes + a long time.\",\"id\":\"Level of Performance After Some Time\"}],\"models\":[{\"description\":\"A + Reinforcement Learning model trained on expert data from the Gym Hopper environment\",\"id\":\"edbeeching/decision-transformer-gym-hopper-expert\"},{\"description\":\"A + PPO agent playing seals/CartPole-v0 using the stable-baselines3 library and + the RL Zoo.\",\"id\":\"HumanCompatibleAI/ppo-seals-CartPole-v0\"}],\"spaces\":[{\"description\":\"An + application for a cute puppy agent learning to catch a stick.\",\"id\":\"ThomasSimonini/Huggy\"},{\"description\":\"An + application to play Snowball Fight with a reinforcement learning agent.\",\"id\":\"ThomasSimonini/SnowballFight\"}],\"summary\":\"Reinforcement + learning is the computational approach of learning from action by interacting + with an environment through trial and error and receiving rewards (negative + or positive) as feedback\",\"widgetModels\":[],\"youtubeId\":\"q0BiUn5LiBc\",\"id\":\"reinforcement-learning\",\"label\":\"Reinforcement + Learning\",\"libraries\":[\"transformers\",\"stable-baselines3\",\"ml-agents\",\"sample-factory\"]},\"sentence-similarity\":{\"datasets\":[{\"description\":\"Bing + queries with relevant passages from various web sources.\",\"id\":\"ms_marco\"}],\"demo\":{\"inputs\":[{\"label\":\"Source + sentence\",\"content\":\"Machine learning is so easy.\",\"type\":\"text\"},{\"label\":\"Sentences + to compare to\",\"content\":\"Deep learning is so straightforward.\",\"type\":\"text\"},{\"label\":\"\",\"content\":\"This + is so difficult, like rocket science.\",\"type\":\"text\"},{\"label\":\"\",\"content\":\"I + can't believe how much I struggled with this.\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Deep + learning is so straightforward.\",\"score\":0.623},{\"label\":\"This is so + difficult, like rocket science.\",\"score\":0.413},{\"label\":\"I can't believe + how much I struggled with this.\",\"score\":0.256}]}]},\"metrics\":[{\"description\":\"Reciprocal + Rank is a measure used to rank the relevancy of documents given a set of documents. + Reciprocal Rank is the reciprocal of the rank of the document retrieved, meaning, + if the rank is 3, the Reciprocal Rank is 0.33. If the rank is 1, the Reciprocal + Rank is 1\",\"id\":\"Mean Reciprocal Rank\"},{\"description\":\"The similarity + of the embeddings is evaluated mainly on cosine similarity. It is calculated + as the cosine of the angle between two vectors. It is particularly useful + when your texts are not the same length\",\"id\":\"Cosine Similarity\"}],\"models\":[{\"description\":\"This + model works well for sentences and paragraphs and can be used for clustering/grouping + and semantic searches.\",\"id\":\"sentence-transformers/all-mpnet-base-v2\"},{\"description\":\"A + multilingual robust sentence similarity model..\",\"id\":\"BAAI/bge-m3\"}],\"spaces\":[{\"description\":\"An + application that leverages sentence similarity to answer questions from YouTube + videos.\",\"id\":\"Gradio-Blocks/Ask_Questions_To_YouTube_Videos\"},{\"description\":\"An + application that retrieves relevant PubMed abstracts for a given online article + which can be used as further references.\",\"id\":\"Gradio-Blocks/pubmed-abstract-retriever\"},{\"description\":\"An + application that leverages sentence similarity to summarize text.\",\"id\":\"nickmuchi/article-text-summarizer\"},{\"description\":\"A + guide that explains how Sentence Transformers can be used for semantic search.\",\"id\":\"sentence-transformers/Sentence_Transformers_for_semantic_search\"}],\"summary\":\"Sentence + Similarity is the task of determining how similar two texts are. Sentence + similarity models convert input texts into vectors (embeddings) that capture + semantic information and calculate how close (similar) they are between them. + This task is particularly useful for information retrieval and clustering/grouping.\",\"widgetModels\":[\"BAAI/bge-small-en-v1.5\"],\"youtubeId\":\"VCZq5AkbNEU\",\"id\":\"sentence-similarity\",\"label\":\"Sentence + Similarity\",\"libraries\":[\"sentence-transformers\",\"spacy\",\"transformers.js\"]},\"summarization\":{\"canonicalId\":\"text2text-generation\",\"datasets\":[{\"description\":\"News + articles in five different languages along with their summaries. Widely used + for benchmarking multilingual summarization models.\",\"id\":\"mlsum\"},{\"description\":\"English + conversations and their summaries. Useful for benchmarking conversational + agents.\",\"id\":\"samsum\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"The + tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey + building, and the tallest structure in Paris. Its base is square, measuring + 125 metres (410 ft) on each side. It was the first structure to reach a height + of 300 metres. Excluding transmitters, the Eiffel Tower is the second tallest + free-standing structure in France after the Millau Viaduct.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Output\",\"content\":\"The + tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey + building. It was the first structure to reach a height of 300 metres.\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"The + generated sequence is compared against its summary, and the overlap of tokens + are counted. ROUGE-N refers to overlap of N subsequent tokens, ROUGE-1 refers + to overlap of single tokens and ROUGE-2 is the overlap of two subsequent tokens.\",\"id\":\"rouge\"}],\"models\":[{\"description\":\"A + strong summarization model trained on English news articles. Excels at generating + factual summaries.\",\"id\":\"facebook/bart-large-cnn\"},{\"description\":\"A + summarization model trained on medical articles.\",\"id\":\"Falconsai/medical_summarization\"}],\"spaces\":[{\"description\":\"An + application that can summarize long paragraphs.\",\"id\":\"pszemraj/summarize-long-text\"},{\"description\":\"A + much needed summarization application for terms and conditions.\",\"id\":\"ml6team/distilbart-tos-summarizer-tosdr\"},{\"description\":\"An + application that summarizes long documents.\",\"id\":\"pszemraj/document-summarization\"},{\"description\":\"An + application that can detect errors in abstractive summarization.\",\"id\":\"ml6team/post-processing-summarization\"}],\"summary\":\"Summarization + is the task of producing a shorter version of a document while preserving + its important information. Some models can extract text from the original + input, while other models can generate entirely new text.\",\"widgetModels\":[\"facebook/bart-large-cnn\"],\"youtubeId\":\"yHnr5Dk2zCI\",\"id\":\"summarization\",\"label\":\"Summarization\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"table-question-answering\":{\"datasets\":[{\"description\":\"The + WikiTableQuestions dataset is a large-scale dataset for the task of question + answering on semi-structured tables.\",\"id\":\"wikitablequestions\"},{\"description\":\"WikiSQL + is a dataset of 80654 hand-annotated examples of questions and SQL queries + distributed across 24241 tables from Wikipedia.\",\"id\":\"wikisql\"}],\"demo\":{\"inputs\":[{\"table\":[[\"Rank\",\"Name\",\"No.of + reigns\",\"Combined days\"],[\"1\",\"lou Thesz\",\"3\",\"3749\"],[\"2\",\"Ric + Flair\",\"8\",\"3103\"],[\"3\",\"Harley Race\",\"7\",\"1799\"]],\"type\":\"tabular\"},{\"label\":\"Question\",\"content\":\"What + is the number of reigns for Harley Race?\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Result\",\"content\":\"7\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Checks + whether the predicted answer(s) is the same as the ground-truth answer(s).\",\"id\":\"Denotation + Accuracy\"}],\"models\":[{\"description\":\"A table question answering model + that is capable of neural SQL execution, i.e., employ TAPEX to execute a SQL + query on a given table.\",\"id\":\"microsoft/tapex-base\"},{\"description\":\"A + robust table question answering model.\",\"id\":\"google/tapas-base-finetuned-wtq\"}],\"spaces\":[{\"description\":\"An + application that answers questions based on table CSV files.\",\"id\":\"katanaml/table-query\"}],\"summary\":\"Table + Question Answering (Table QA) is the answering a question about an information + on a given table.\",\"widgetModels\":[\"google/tapas-base-finetuned-wtq\"],\"id\":\"table-question-answering\",\"label\":\"Table + Question Answering\",\"libraries\":[\"transformers\"]},\"tabular-classification\":{\"datasets\":[{\"description\":\"A + comprehensive curation of datasets covering all benchmarks.\",\"id\":\"inria-soda/tabular-benchmark\"}],\"demo\":{\"inputs\":[{\"table\":[[\"Glucose\",\"Blood + Pressure \",\"Skin Thickness\",\"Insulin\",\"BMI\"],[\"148\",\"72\",\"35\",\"0\",\"33.6\"],[\"150\",\"50\",\"30\",\"0\",\"35.1\"],[\"141\",\"60\",\"29\",\"1\",\"39.2\"]],\"type\":\"tabular\"}],\"outputs\":[{\"table\":[[\"Diabetes\"],[\"1\"],[\"1\"],[\"0\"]],\"type\":\"tabular\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"Breast + cancer prediction model based on decision trees.\",\"id\":\"scikit-learn/cancer-prediction-trees\"}],\"spaces\":[{\"description\":\"An + application that can predict defective products on a production line.\",\"id\":\"scikit-learn/tabular-playground\"},{\"description\":\"An + application that compares various tabular classification techniques on different + datasets.\",\"id\":\"scikit-learn/classification\"}],\"summary\":\"Tabular + classification is the task of classifying a target category (a group) based + on set of attributes.\",\"widgetModels\":[\"scikit-learn/tabular-playground\"],\"youtubeId\":\"\",\"id\":\"tabular-classification\",\"label\":\"Tabular + Classification\",\"libraries\":[\"sklearn\"]},\"tabular-regression\":{\"datasets\":[{\"description\":\"A + comprehensive curation of datasets covering all benchmarks.\",\"id\":\"inria-soda/tabular-benchmark\"}],\"demo\":{\"inputs\":[{\"table\":[[\"Car + Name\",\"Horsepower\",\"Weight\"],[\"ford torino\",\"140\",\"3,449\"],[\"amc + hornet\",\"97\",\"2,774\"],[\"toyota corolla\",\"65\",\"1,773\"]],\"type\":\"tabular\"}],\"outputs\":[{\"table\":[[\"MPG + (miles per gallon)\"],[\"17\"],[\"18\"],[\"31\"]],\"type\":\"tabular\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"mse\"},{\"description\":\"Coefficient + of determination (or R-squared) is a measure of how well the model fits the + data. Higher R-squared is considered a better fit.\",\"id\":\"r-squared\"}],\"models\":[{\"description\":\"Fish + weight prediction based on length measurements and species.\",\"id\":\"scikit-learn/Fish-Weight\"}],\"spaces\":[{\"description\":\"An + application that can predict weight of a fish based on set of attributes.\",\"id\":\"scikit-learn/fish-weight-prediction\"}],\"summary\":\"Tabular + regression is the task of predicting a numerical value given a set of attributes.\",\"widgetModels\":[\"scikit-learn/Fish-Weight\"],\"youtubeId\":\"\",\"id\":\"tabular-regression\",\"label\":\"Tabular + Regression\",\"libraries\":[\"sklearn\"]},\"text-classification\":{\"datasets\":[{\"description\":\"A + widely used dataset used to benchmark multiple variants of text classification.\",\"id\":\"nyu-mll/glue\"},{\"description\":\"A + text classification dataset used to benchmark natural language inference models\",\"id\":\"stanfordnlp/snli\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"I + love Hugging Face!\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"POSITIVE\",\"score\":0.9},{\"label\":\"NEUTRAL\",\"score\":0.1},{\"label\":\"NEGATIVE\",\"score\":0}]}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"The + F1 metric is the harmonic mean of the precision and recall. It can be calculated + as: F1 = 2 * (precision * recall) / (precision + recall)\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + robust model trained for sentiment analysis.\",\"id\":\"distilbert/distilbert-base-uncased-finetuned-sst-2-english\"},{\"description\":\"A + sentiment analysis model specialized in financial sentiment.\",\"id\":\"ProsusAI/finbert\"},{\"description\":\"A + sentiment analysis model specialized in analyzing tweets.\",\"id\":\"cardiffnlp/twitter-roberta-base-sentiment-latest\"},{\"description\":\"A + model that can classify languages.\",\"id\":\"papluca/xlm-roberta-base-language-detection\"},{\"description\":\"A + model that can classify text generation attacks.\",\"id\":\"meta-llama/Prompt-Guard-86M\"}],\"spaces\":[{\"description\":\"An + application that can classify financial sentiment.\",\"id\":\"IoannisTr/Tech_Stocks_Trading_Assistant\"},{\"description\":\"A + dashboard that contains various text classification tasks.\",\"id\":\"miesnerjacob/Multi-task-NLP\"},{\"description\":\"An + application that analyzes user reviews in healthcare.\",\"id\":\"spacy/healthsea-demo\"}],\"summary\":\"Text + Classification is the task of assigning a label or class to a given text. + Some use cases are sentiment analysis, natural language inference, and assessing + grammatical correctness.\",\"widgetModels\":[\"distilbert/distilbert-base-uncased-finetuned-sst-2-english\"],\"youtubeId\":\"leNG9fN9FQU\",\"id\":\"text-classification\",\"label\":\"Text + Classification\",\"libraries\":[\"adapter-transformers\",\"setfit\",\"spacy\",\"transformers\",\"transformers.js\"]},\"text-generation\":{\"datasets\":[{\"description\":\"A + large multilingual dataset of text crawled from the web.\",\"id\":\"mc4\"},{\"description\":\"Diverse + open-source data consisting of 22 smaller high-quality datasets. It was used + to train GPT-Neo.\",\"id\":\"the_pile\"},{\"description\":\"Truly open-source, + curated and cleaned dialogue dataset.\",\"id\":\"HuggingFaceH4/ultrachat_200k\"},{\"description\":\"An + instruction dataset with preference ratings on responses.\",\"id\":\"openbmb/UltraFeedback\"},{\"description\":\"A + large synthetic dataset for alignment of text generation models.\",\"id\":\"argilla/magpie-ultra-v0.1\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"Once + upon a time,\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Output\",\"content\":\"Once + upon a time, we knew that our ancestors were on the verge of extinction. The + great explorers and poets of the Old World, from Alexander the Great to Chaucer, + are dead and gone. A good many of our ancient explorers and poets have\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Cross + Entropy is a metric that calculates the difference between two probability + distributions. Each probability distribution is the distribution of predicted + words\",\"id\":\"Cross Entropy\"},{\"description\":\"The Perplexity metric + is the exponential of the cross-entropy loss. It evaluates the probabilities + assigned to the next word by the model. Lower perplexity indicates better + performance\",\"id\":\"Perplexity\"}],\"models\":[{\"description\":\"A text-generation + model trained to follow instructions.\",\"id\":\"google/gemma-2-2b-it\"},{\"description\":\"Very + powerful text generation model trained to follow instructions.\",\"id\":\"meta-llama/Meta-Llama-3.1-8B-Instruct\"},{\"description\":\"Small + yet powerful text generation model.\",\"id\":\"microsoft/Phi-3-mini-4k-instruct\"},{\"description\":\"A + very powerful model that can solve mathematical problems.\",\"id\":\"AI-MO/NuminaMath-7B-TIR\"},{\"description\":\"Strong + text generation model to follow instructions.\",\"id\":\"Qwen/Qwen2.5-7B-Instruct\"},{\"description\":\"Very + strong open-source large language model.\",\"id\":\"nvidia/Llama-3.1-Nemotron-70B-Instruct\"}],\"spaces\":[{\"description\":\"A + leaderboard to compare different open-source text generation models based + on various benchmarks.\",\"id\":\"open-llm-leaderboard/open_llm_leaderboard\"},{\"description\":\"A + leaderboard for comparing chain-of-thought performance of models.\",\"id\":\"logikon/open_cot_leaderboard\"},{\"description\":\"An + text generation based application based on a very powerful LLaMA2 model.\",\"id\":\"ysharma/Explore_llamav2_with_TGI\"},{\"description\":\"An + text generation based application to converse with Zephyr model.\",\"id\":\"HuggingFaceH4/zephyr-chat\"},{\"description\":\"A + leaderboard that ranks text generation models based on blind votes from people.\",\"id\":\"lmsys/chatbot-arena-leaderboard\"},{\"description\":\"An + chatbot to converse with a very powerful text generation model.\",\"id\":\"mlabonne/phixtral-chat\"}],\"summary\":\"Generating + text is the task of generating new text given another text. These models can, + for example, fill in incomplete text or paraphrase.\",\"widgetModels\":[\"mistralai/Mistral-Nemo-Instruct-2407\"],\"youtubeId\":\"e9gNEAlsOvU\",\"id\":\"text-generation\",\"label\":\"Text + Generation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"text-to-image\":{\"datasets\":[{\"description\":\"RedCaps + is a large-scale dataset of 12M image-text pairs collected from Reddit.\",\"id\":\"red_caps\"},{\"description\":\"Conceptual + Captions is a dataset consisting of ~3.3M images annotated with captions.\",\"id\":\"conceptual_captions\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"A + city above clouds, pastel colors, Victorian style\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"image.jpeg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + Inception Score (IS) measure assesses diversity and meaningfulness. It uses + a generated image sample to predict its label. A higher score signifies more + diverse and meaningful images.\",\"id\":\"IS\"},{\"description\":\"The Fr\xE9chet + Inception Distance (FID) calculates the distance between distributions between + synthetic and real samples. A lower FID score indicates better similarity + between the distributions of real and generated images.\",\"id\":\"FID\"},{\"description\":\"R-precision + assesses how the generated image aligns with the provided text description. + It uses the generated images as queries to retrieve relevant text descriptions. + The top 'r' relevant descriptions are selected and used to calculate R-precision + as r/R, where 'R' is the number of ground truth descriptions associated with + the generated images. A higher R-precision value indicates a better model.\",\"id\":\"R-Precision\"}],\"models\":[{\"description\":\"One + of the most powerful image generation models that can generate realistic outputs.\",\"id\":\"black-forest-labs/FLUX.1-dev\"},{\"description\":\"A + powerful yet fast image generation model.\",\"id\":\"latent-consistency/lcm-lora-sdxl\"},{\"description\":\"Text-to-image + model for photorealistic generation.\",\"id\":\"Kwai-Kolors/Kolors\"},{\"description\":\"A + powerful text-to-image model.\",\"id\":\"stabilityai/stable-diffusion-3-medium-diffusers\"}],\"spaces\":[{\"description\":\"A + powerful text-to-image application.\",\"id\":\"stabilityai/stable-diffusion-3-medium\"},{\"description\":\"A + text-to-image application to generate comics.\",\"id\":\"jbilcke-hf/ai-comic-factory\"},{\"description\":\"An + application to match multiple custom image generation models.\",\"id\":\"multimodalart/flux-lora-lab\"},{\"description\":\"A + powerful yet very fast image generation application.\",\"id\":\"latent-consistency/lcm-lora-for-sdxl\"},{\"description\":\"A + gallery to explore various text-to-image models.\",\"id\":\"multimodalart/LoraTheExplorer\"},{\"description\":\"An + application for `text-to-image`, `image-to-image` and image inpainting.\",\"id\":\"ArtGAN/Stable-Diffusion-ControlNet-WebUI\"},{\"description\":\"An + application to generate realistic images given photos of a person and a prompt.\",\"id\":\"InstantX/InstantID\"}],\"summary\":\"Text-to-image + is the task of generating images from input text. These pipelines can also + be used to modify and edit images based on text prompts.\",\"widgetModels\":[\"black-forest-labs/FLUX.1-dev\"],\"youtubeId\":\"\",\"id\":\"text-to-image\",\"label\":\"Text-to-Image\",\"libraries\":[\"diffusers\"]},\"text-to-speech\":{\"canonicalId\":\"text-to-audio\",\"datasets\":[{\"description\":\"10K + hours of multi-speaker English dataset.\",\"id\":\"parler-tts/mls_eng_10k\"},{\"description\":\"Multi-speaker + English dataset.\",\"id\":\"mythicinfinity/libritts_r\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"I + love audio models on the Hub!\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"audio.wav\",\"type\":\"audio\"}]},\"metrics\":[{\"description\":\"The + Mel Cepstral Distortion (MCD) metric is used to calculate the quality of generated + speech.\",\"id\":\"mel cepstral distortion\"}],\"models\":[{\"description\":\"A + powerful TTS model.\",\"id\":\"parler-tts/parler-tts-large-v1\"},{\"description\":\"A + massively multi-lingual TTS model.\",\"id\":\"coqui/XTTS-v2\"},{\"description\":\"Robust + TTS model.\",\"id\":\"metavoiceio/metavoice-1B-v0.1\"},{\"description\":\"A + prompt based, powerful TTS model.\",\"id\":\"parler-tts/parler_tts_mini_v0.1\"}],\"spaces\":[{\"description\":\"An + application for generate highly realistic, multilingual speech.\",\"id\":\"suno/bark\"},{\"description\":\"An + application on XTTS, a voice generation model that lets you clone voices into + different languages.\",\"id\":\"coqui/xtts\"},{\"description\":\"An application + that generates speech in different styles in English and Chinese.\",\"id\":\"mrfakename/E2-F5-TTS\"},{\"description\":\"An + application that synthesizes speech for diverse speaker prompts.\",\"id\":\"parler-tts/parler_tts_mini\"}],\"summary\":\"Text-to-Speech + (TTS) is the task of generating natural sounding speech given text input. + TTS models can be extended to have a single model that generates speech for + multiple speakers and multiple languages.\",\"widgetModels\":[\"suno/bark\"],\"youtubeId\":\"NW62DpzJ274\",\"id\":\"text-to-speech\",\"label\":\"Text-to-Speech\",\"libraries\":[\"espnet\",\"tensorflowtts\",\"transformers\",\"transformers.js\"]},\"text-to-video\":{\"datasets\":[{\"description\":\"Microsoft + Research Video to Text is a large-scale dataset for open domain video captioning\",\"id\":\"iejMac/CLIP-MSR-VTT\"},{\"description\":\"UCF101 + Human Actions dataset consists of 13,320 video clips from YouTube, with 101 + classes.\",\"id\":\"quchenyuan/UCF101-ZIP\"},{\"description\":\"A high-quality + dataset for human action recognition in YouTube videos.\",\"id\":\"nateraw/kinetics\"},{\"description\":\"A + dataset of video clips of humans performing pre-defined basic actions with + everyday objects.\",\"id\":\"HuggingFaceM4/something_something_v2\"},{\"description\":\"This + dataset consists of text-video pairs and contains noisy samples with irrelevant + video descriptions\",\"id\":\"HuggingFaceM4/webvid\"},{\"description\":\"A + dataset of short Flickr videos for the temporal localization of events with + descriptions.\",\"id\":\"iejMac/CLIP-DiDeMo\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"Darth + Vader is surfing on the waves.\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"text-to-video-output.gif\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"Inception + Score uses an image classification model that predicts class labels and evaluates + how distinct and diverse the images are. A higher score indicates better video + generation.\",\"id\":\"is\"},{\"description\":\"Frechet Inception Distance + uses an image classification model to obtain image embeddings. The metric + compares mean and standard deviation of the embeddings of real and generated + images. A smaller score indicates better video generation.\",\"id\":\"fid\"},{\"description\":\"Frechet + Video Distance uses a model that captures coherence for changes in frames + and the quality of each frame. A smaller score indicates better video generation.\",\"id\":\"fvd\"},{\"description\":\"CLIPSIM + measures similarity between video frames and text using an image-text similarity + model. A higher score indicates better video generation.\",\"id\":\"clipsim\"}],\"models\":[{\"description\":\"A + strong model for consistent video generation.\",\"id\":\"rain1011/pyramid-flow-sd3\"},{\"description\":\"A + robust model for text-to-video generation.\",\"id\":\"VideoCrafter/VideoCrafter2\"},{\"description\":\"A + cutting-edge text-to-video generation model.\",\"id\":\"TIGER-Lab/T2V-Turbo-V2\"}],\"spaces\":[{\"description\":\"An + application that generates video from text.\",\"id\":\"VideoCrafter/VideoCrafter\"},{\"description\":\"Consistent + video generation application.\",\"id\":\"TIGER-Lab/T2V-Turbo-V2\"},{\"description\":\"A + cutting edge video generation application.\",\"id\":\"Pyramid-Flow/pyramid-flow\"}],\"summary\":\"Text-to-video + models can be used in any application that requires generating consistent + sequence of images from text. \",\"widgetModels\":[],\"id\":\"text-to-video\",\"label\":\"Text-to-Video\",\"libraries\":[\"diffusers\"]},\"token-classification\":{\"datasets\":[{\"description\":\"A + widely used dataset useful to benchmark named entity recognition models.\",\"id\":\"eriktks/conll2003\"},{\"description\":\"A + multilingual dataset of Wikipedia articles annotated for named entity recognition + in over 150 different languages.\",\"id\":\"unimelb-nlp/wikiann\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"My + name is Omar and I live in Z\xFCrich.\",\"type\":\"text\"}],\"outputs\":[{\"text\":\"My + name is Omar and I live in Z\xFCrich.\",\"tokens\":[{\"type\":\"PERSON\",\"start\":11,\"end\":15},{\"type\":\"GPE\",\"start\":30,\"end\":36}],\"type\":\"text-with-tokens\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + robust performance model to identify people, locations, organizations and + names of miscellaneous entities.\",\"id\":\"dslim/bert-base-NER\"},{\"description\":\"A + strong model to identify people, locations, organizations and names in multiple + languages.\",\"id\":\"FacebookAI/xlm-roberta-large-finetuned-conll03-english\"},{\"description\":\"A + token classification model specialized on medical entity recognition.\",\"id\":\"blaze999/Medical-NER\"},{\"description\":\"Flair + models are typically the state of the art in named entity recognition tasks.\",\"id\":\"flair/ner-english\"}],\"spaces\":[{\"description\":\"An + application that can recognizes entities, extracts noun chunks and recognizes + various linguistic features of each token.\",\"id\":\"spacy/gradio_pipeline_visualizer\"}],\"summary\":\"Token + classification is a natural language understanding task in which a label is + assigned to some tokens in a text. Some popular token classification subtasks + are Named Entity Recognition (NER) and Part-of-Speech (PoS) tagging. NER models + could be trained to identify specific entities in a text, such as dates, individuals + and places; and PoS tagging would identify, for example, which words in a + text are verbs, nouns, and punctuation marks.\",\"widgetModels\":[\"FacebookAI/xlm-roberta-large-finetuned-conll03-english\"],\"youtubeId\":\"wVHdVlPScxA\",\"id\":\"token-classification\",\"label\":\"Token + Classification\",\"libraries\":[\"adapter-transformers\",\"flair\",\"spacy\",\"span-marker\",\"stanza\",\"transformers\",\"transformers.js\"]},\"translation\":{\"canonicalId\":\"text2text-generation\",\"datasets\":[{\"description\":\"A + dataset of copyright-free books translated into 16 different languages.\",\"id\":\"Helsinki-NLP/opus_books\"},{\"description\":\"An + example of translation between programming languages. This dataset consists + of functions in Java and C#.\",\"id\":\"google/code_x_glue_cc_code_to_code_trans\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"My + name is Omar and I live in Z\xFCrich.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Output\",\"content\":\"Mein + Name ist Omar und ich wohne in Z\xFCrich.\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"BLEU + score is calculated by counting the number of shared single or subsequent + tokens between the generated sequence and the reference. Subsequent n tokens + are called \u201Cn-grams\u201D. Unigram refers to a single token while bi-gram + refers to token pairs and n-grams refer to n subsequent tokens. The score + ranges from 0 to 1, where 1 means the translation perfectly matched and 0 + did not match at all\",\"id\":\"bleu\"},{\"description\":\"\",\"id\":\"sacrebleu\"}],\"models\":[{\"description\":\"Very + powerful model that can translate many languages between each other, especially + low-resource languages.\",\"id\":\"facebook/nllb-200-1.3B\"},{\"description\":\"A + general-purpose Transformer that can be used to translate from English to + German, French, or Romanian.\",\"id\":\"google-t5/t5-base\"}],\"spaces\":[{\"description\":\"An + application that can translate between 100 languages.\",\"id\":\"Iker/Translate-100-languages\"},{\"description\":\"An + application that can translate between many languages.\",\"id\":\"Geonmo/nllb-translation-demo\"}],\"summary\":\"Translation + is the task of converting text from one language to another.\",\"widgetModels\":[\"facebook/mbart-large-50-many-to-many-mmt\"],\"youtubeId\":\"1JvfrvZgi6c\",\"id\":\"translation\",\"label\":\"Translation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"unconditional-image-generation\":{\"datasets\":[{\"description\":\"The + CIFAR-100 dataset consists of 60000 32x32 colour images in 100 classes, with + 600 images per class.\",\"id\":\"cifar100\"},{\"description\":\"Multiple images + of celebrities, used for facial expression translation.\",\"id\":\"CelebA\"}],\"demo\":{\"inputs\":[{\"label\":\"Seed\",\"content\":\"42\",\"type\":\"text\"},{\"label\":\"Number + of images to generate:\",\"content\":\"4\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"unconditional-image-generation-output.jpeg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + inception score (IS) evaluates the quality of generated images. It measures + the diversity of the generated images (the model predictions are evenly distributed + across all possible labels) and their 'distinction' or 'sharpness' (the model + confidently predicts a single label for each image).\",\"id\":\"Inception + score (IS)\"},{\"description\":\"The Fr\xE9chet Inception Distance (FID) evaluates + the quality of images created by a generative model by calculating the distance + between feature vectors for real and generated images.\",\"id\":\"Fre\u0107het + Inception Distance (FID)\"}],\"models\":[{\"description\":\"High-quality image + generation model trained on the CIFAR-10 dataset. It synthesizes images of + the ten classes presented in the dataset using diffusion probabilistic models, + a class of latent variable models inspired by considerations from nonequilibrium + thermodynamics.\",\"id\":\"google/ddpm-cifar10-32\"},{\"description\":\"High-quality + image generation model trained on the 256x256 CelebA-HQ dataset. It synthesizes + images of faces using diffusion probabilistic models, a class of latent variable + models inspired by considerations from nonequilibrium thermodynamics.\",\"id\":\"google/ddpm-celebahq-256\"}],\"spaces\":[{\"description\":\"An + application that can generate realistic faces.\",\"id\":\"CompVis/celeba-latent-diffusion\"}],\"summary\":\"Unconditional + image generation is the task of generating images with no condition in any + context (like a prompt text or another image). Once trained, the model will + create images that resemble its training data distribution.\",\"widgetModels\":[\"\"],\"youtubeId\":\"\",\"id\":\"unconditional-image-generation\",\"label\":\"Unconditional + Image Generation\",\"libraries\":[\"diffusers\"]},\"video-text-to-text\":{\"datasets\":[{\"description\":\"Multiple-choice + questions and answers about videos.\",\"id\":\"lmms-lab/Video-MME\"},{\"description\":\"A + dataset of instructions and question-answer pairs about videos.\",\"id\":\"lmms-lab/VideoChatGPT\"},{\"description\":\"Large + video understanding dataset.\",\"id\":\"HuggingFaceFV/finevideo\"}],\"demo\":{\"inputs\":[{\"filename\":\"video-text-to-text-input.gif\",\"type\":\"img\"},{\"label\":\"Text + Prompt\",\"content\":\"What is happening in this video?\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"The + video shows a series of images showing a fountain with water jets and a variety + of colorful flowers and butterflies in the background.\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + robust video-text-to-text model that can take in image and video inputs.\",\"id\":\"llava-hf/llava-onevision-qwen2-72b-ov-hf\"},{\"description\":\"Large + and powerful video-text-to-text model that can take in image and video inputs.\",\"id\":\"llava-hf/LLaVA-NeXT-Video-34B-hf\"}],\"spaces\":[{\"description\":\"An + application to chat with a video-text-to-text model.\",\"id\":\"llava-hf/video-llava\"},{\"description\":\"A + leaderboard for various video-text-to-text models.\",\"id\":\"opencompass/openvlm_video_leaderboard\"}],\"summary\":\"Video-text-to-text + models take in a video and a text prompt and output text. These models are + also called video-language models.\",\"widgetModels\":[\"\"],\"youtubeId\":\"\",\"id\":\"video-text-to-text\",\"label\":\"Video-Text-to-Text\",\"libraries\":[\"transformers\"]},\"visual-question-answering\":{\"datasets\":[{\"description\":\"A + widely used dataset containing questions (with answers) about images.\",\"id\":\"Graphcore/vqa\"},{\"description\":\"A + dataset to benchmark visual reasoning based on text in images.\",\"id\":\"facebook/textvqa\"}],\"demo\":{\"inputs\":[{\"filename\":\"elephant.jpeg\",\"type\":\"img\"},{\"label\":\"Question\",\"content\":\"What + is in this image?\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"elephant\",\"score\":0.97},{\"label\":\"elephants\",\"score\":0.06},{\"label\":\"animal\",\"score\":0.003}]}]},\"isPlaceholder\":false,\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"Measures + how much a predicted answer differs from the ground truth based on the difference + in their semantic meaning.\",\"id\":\"wu-palmer similarity\"}],\"models\":[{\"description\":\"A + visual question answering model trained to convert charts and plots to text.\",\"id\":\"google/deplot\"},{\"description\":\"A + visual question answering model trained for mathematical reasoning and chart + derendering from images.\",\"id\":\"google/matcha-base\"},{\"description\":\"A + strong visual question answering that answers questions from book covers.\",\"id\":\"google/pix2struct-ocrvqa-large\"}],\"spaces\":[{\"description\":\"An + application that compares visual question answering models across different + tasks.\",\"id\":\"merve/pix2struct\"},{\"description\":\"An application that + can answer questions based on images.\",\"id\":\"nielsr/vilt-vqa\"},{\"description\":\"An + application that can caption images and answer questions about a given image. + \",\"id\":\"Salesforce/BLIP\"},{\"description\":\"An application that can + caption images and answer questions about a given image. \",\"id\":\"vumichien/Img2Prompt\"}],\"summary\":\"Visual + Question Answering is the task of answering open-ended questions based on + an image. They output natural language responses to natural language questions.\",\"widgetModels\":[\"dandelin/vilt-b32-finetuned-vqa\"],\"youtubeId\":\"\",\"id\":\"visual-question-answering\",\"label\":\"Visual + Question Answering\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"zero-shot-classification\":{\"datasets\":[{\"description\":\"A + widely used dataset used to benchmark multiple variants of text classification.\",\"id\":\"nyu-mll/glue\"},{\"description\":\"The + Multi-Genre Natural Language Inference (MultiNLI) corpus is a crowd-sourced + collection of 433k sentence pairs annotated with textual entailment information.\",\"id\":\"nyu-mll/multi_nli\"},{\"description\":\"FEVER + is a publicly available dataset for fact extraction and verification against + textual sources.\",\"id\":\"fever/fever\"}],\"demo\":{\"inputs\":[{\"label\":\"Text + Input\",\"content\":\"Dune is the best movie ever.\",\"type\":\"text\"},{\"label\":\"Candidate + Labels\",\"content\":\"CINEMA, ART, MUSIC\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"CINEMA\",\"score\":0.9},{\"label\":\"ART\",\"score\":0.1},{\"label\":\"MUSIC\",\"score\":0}]}]},\"metrics\":[],\"models\":[{\"description\":\"Powerful + zero-shot text classification model.\",\"id\":\"facebook/bart-large-mnli\"},{\"description\":\"Powerful + zero-shot multilingual text classification model that can accomplish multiple + tasks.\",\"id\":\"MoritzLaurer/mDeBERTa-v3-base-xnli-multilingual-nli-2mil7\"}],\"spaces\":[],\"summary\":\"Zero-shot + text classification is a task in natural language processing where a model + is trained on a set of labeled examples but is then able to classify new examples + from previously unseen classes.\",\"widgetModels\":[\"facebook/bart-large-mnli\"],\"id\":\"zero-shot-classification\",\"label\":\"Zero-Shot + Classification\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"zero-shot-image-classification\":{\"datasets\":[{\"description\":\"\",\"id\":\"\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-classification-input.jpeg\",\"type\":\"img\"},{\"label\":\"Classes\",\"content\":\"cat, + dog, bird\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Cat\",\"score\":0.664},{\"label\":\"Dog\",\"score\":0.329},{\"label\":\"Bird\",\"score\":0.008}]}]},\"metrics\":[{\"description\":\"Computes + the number of times the correct label appears in top K labels predicted\",\"id\":\"top-K + accuracy\"}],\"models\":[{\"description\":\"Robust image classification model + trained on publicly available image-caption data.\",\"id\":\"openai/clip-vit-base-patch16\"},{\"description\":\"Strong + zero-shot image classification model.\",\"id\":\"google/siglip-so400m-patch14-224\"},{\"description\":\"Small + yet powerful zero-shot image classification model that can run on edge devices.\",\"id\":\"apple/MobileCLIP-S1-OpenCLIP\"},{\"description\":\"Strong + image classification model for biomedical domain.\",\"id\":\"microsoft/BiomedCLIP-PubMedBERT_256-vit_base_patch16_224\"}],\"spaces\":[{\"description\":\"An + application that leverages zero-shot image classification to find best captions + to generate an image. \",\"id\":\"pharma/CLIP-Interrogator\"},{\"description\":\"An + application to compare different zero-shot image classification models. \",\"id\":\"merve/compare_clip_siglip\"}],\"summary\":\"Zero-shot + image classification is the task of classifying previously unseen classes + during training of a model.\",\"widgetModels\":[\"google/siglip-so400m-patch14-224\"],\"youtubeId\":\"\",\"id\":\"zero-shot-image-classification\",\"label\":\"Zero-Shot + Image Classification\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"zero-shot-object-detection\":{\"datasets\":[],\"demo\":{\"inputs\":[{\"filename\":\"zero-shot-object-detection-input.jpg\",\"type\":\"img\"},{\"label\":\"Classes\",\"content\":\"cat, + dog, bird\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"zero-shot-object-detection-output.jpg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + Average Precision (AP) metric is the Area Under the PR Curve (AUC-PR). It + is calculated for each class separately\",\"id\":\"Average Precision\"},{\"description\":\"The + Mean Average Precision (mAP) metric is the overall average of the AP values\",\"id\":\"Mean + Average Precision\"},{\"description\":\"The AP\u03B1 metric is the Average + Precision at the IoU threshold of a \u03B1 value, for example, AP50 and AP75\",\"id\":\"AP\u03B1\"}],\"models\":[{\"description\":\"Solid + zero-shot object detection model.\",\"id\":\"IDEA-Research/grounding-dino-base\"},{\"description\":\"Cutting-edge + zero-shot object detection model.\",\"id\":\"google/owlv2-base-patch16-ensemble\"}],\"spaces\":[{\"description\":\"A + demo to try the state-of-the-art zero-shot object detection model, OWLv2.\",\"id\":\"merve/owlv2\"},{\"description\":\"A + demo that combines a zero-shot object detection and mask generation model + for zero-shot segmentation.\",\"id\":\"merve/OWLSAM\"}],\"summary\":\"Zero-shot + object detection is a computer vision task to detect objects and their classes + in images, without any prior training or knowledge of the classes. Zero-shot + object detection models receive an image as input, as well as a list of candidate + classes, and output the bounding boxes and labels where the objects have been + detected.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"zero-shot-object-detection\",\"label\":\"Zero-Shot + Object Detection\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"text-to-3d\":{\"datasets\":[{\"description\":\"A + large dataset of over 10 million 3D objects.\",\"id\":\"allenai/objaverse-xl\"},{\"description\":\"Descriptive + captions for 3D objects in Objaverse.\",\"id\":\"tiange/Cap3D\"}],\"demo\":{\"inputs\":[{\"label\":\"Prompt\",\"content\":\"a + cat statue\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Result\",\"content\":\"text-to-3d-3d-output-filename.glb\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"Text-to-3D + mesh model by OpenAI\",\"id\":\"openai/shap-e\"},{\"description\":\"Generative + 3D gaussian splatting model.\",\"id\":\"ashawkey/LGM\"}],\"spaces\":[{\"description\":\"Text-to-3D + demo with mesh outputs.\",\"id\":\"hysts/Shap-E\"},{\"description\":\"Text/image-to-3D + demo with splat outputs.\",\"id\":\"ashawkey/LGM\"}],\"summary\":\"Text-to-3D + models take in text input and produce 3D output.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"text-to-3d\",\"label\":\"Text-to-3D\",\"libraries\":[\"diffusers\"]},\"image-to-3d\":{\"datasets\":[{\"description\":\"A + large dataset of over 10 million 3D objects.\",\"id\":\"allenai/objaverse-xl\"},{\"description\":\"A + dataset of isolated object images for evaluating image-to-3D models.\",\"id\":\"dylanebert/iso3d\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-to-3d-image-input.png\",\"type\":\"img\"}],\"outputs\":[{\"label\":\"Result\",\"content\":\"image-to-3d-3d-output-filename.glb\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"Fast + image-to-3D mesh model by Tencent.\",\"id\":\"TencentARC/InstantMesh\"},{\"description\":\"Fast + image-to-3D mesh model by StabilityAI\",\"id\":\"stabilityai/TripoSR\"},{\"description\":\"A + scaled up image-to-3D mesh model derived from TripoSR.\",\"id\":\"hwjiang/Real3D\"},{\"description\":\"Generative + 3D gaussian splatting model.\",\"id\":\"ashawkey/LGM\"}],\"spaces\":[{\"description\":\"Leaderboard + to evaluate image-to-3D models.\",\"id\":\"dylanebert/3d-arena\"},{\"description\":\"Image-to-3D + demo with mesh outputs.\",\"id\":\"TencentARC/InstantMesh\"},{\"description\":\"Image-to-3D + demo with mesh outputs.\",\"id\":\"stabilityai/TripoSR\"},{\"description\":\"Image-to-3D + demo with mesh outputs.\",\"id\":\"hwjiang/Real3D\"},{\"description\":\"Image-to-3D + demo with splat outputs.\",\"id\":\"dylanebert/LGM-mini\"}],\"summary\":\"Image-to-3D + models take in image input and produce 3D output.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"image-to-3d\",\"label\":\"Image-to-3D\",\"libraries\":[\"diffusers\"]}}" + headers: + Access-Control-Allow-Origin: + - https://huggingface.co + Access-Control-Expose-Headers: + - X-Repo-Commit,X-Request-Id,X-Error-Code,X-Error-Message,X-Total-Count,ETag,Link,Accept-Ranges,Content-Range + Connection: + - keep-alive + Content-Length: + - '73726' + Content-Type: + - application/json; charset=utf-8 + Date: + - Thu, 31 Oct 2024 13:57:59 GMT + ETag: + - W/"11ffe-LYeRHzSmA1Ja2XTx5UWNPygcpnc" + Referrer-Policy: + - strict-origin-when-cross-origin + Vary: + - Origin + Via: + - 1.1 78844c00bd6cc9ba55086a896f473464.cloudfront.net (CloudFront) + X-Amz-Cf-Id: + - 5UgiF_IXPI7u1oEwlp1kzgifAiGZs5e_1Hz-i4j5FtUgWcixZdnjuQ== + X-Amz-Cf-Pop: + - CCU50-P1 + X-Cache: + - Miss from cloudfront + X-Powered-By: + - huggingface-moon + X-Request-Id: + - Root=1-67238ce7-115facb60f11149e7f2f0f0d;7f98dc88-ecd7-4767-8d7c-6dfac377ae6b + cross-origin-opener-policy: + - same-origin + status: + code: 200 + message: OK +- request: + body: '{"inputs": "My name is Sarah Jessica Parker but you can call me Jessica"}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate, br, zstd + Connection: + - keep-alive + Content-Length: + - '73' + Content-Type: + - application/json + X-Amzn-Trace-Id: + - d36135a4-025d-4db3-9632-cb820959af3a + user-agent: + - unknown/None; hf_hub/0.26.2; python/3.9.12; torch/2.4.1 + method: POST + uri: https://api-inference.huggingface.co/models/FacebookAI/xlm-roberta-large-finetuned-conll03-english + response: + body: + string: '[{"entity_group":"PER","score":0.9999532103538513,"word":"Sarah Jessica + Parker","start":11,"end":31},{"entity_group":"PER","score":0.9999523162841797,"word":"Jessica","start":52,"end":59}]' + headers: + Connection: + - keep-alive + Content-Length: + - '188' + Content-Type: + - application/json + Date: + - Thu, 31 Oct 2024 13:58:00 GMT + access-control-allow-credentials: + - 'true' + vary: + - Origin, Access-Control-Request-Method, Access-Control-Request-Headers + x-compute-time: + - '0.118' + x-compute-type: + - cache + x-request-id: + - L1CiImJrKPSRcqMOgNL3x + x-sha: + - 18f95e9924f3f452df09cc90945073906ef18f1e + status: + code: 200 + message: OK +version: 1 diff --git a/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_token_classification_async.yaml b/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_token_classification_async.yaml new file mode 100644 index 00000000000..3ae9079cd8f --- /dev/null +++ b/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_token_classification_async.yaml @@ -0,0 +1,773 @@ +interactions: +- request: + body: null + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate, br, zstd + Connection: + - keep-alive + X-Amzn-Trace-Id: + - d8022c25-9547-4764-a1c7-e4dad24723ca + user-agent: + - unknown/None; hf_hub/0.26.2; python/3.9.12; torch/2.4.1 + method: GET + uri: https://huggingface.co/api/tasks + response: + body: + string: "{\"any-to-any\":{\"datasets\":[],\"demo\":{\"inputs\":[],\"outputs\":[]},\"isPlaceholder\":true,\"metrics\":[],\"models\":[],\"spaces\":[],\"summary\":\"\",\"widgetModels\":[],\"id\":\"any-to-any\",\"label\":\"Any-to-Any\",\"libraries\":[\"transformers\"]},\"audio-classification\":{\"datasets\":[{\"description\":\"A + benchmark of 10 different audio tasks.\",\"id\":\"s3prl/superb\"},{\"description\":\"A + dataset of YouTube clips and their sound categories.\",\"id\":\"agkphysics/AudioSet\"}],\"demo\":{\"inputs\":[{\"filename\":\"audio.wav\",\"type\":\"audio\"}],\"outputs\":[{\"data\":[{\"label\":\"Up\",\"score\":0.2},{\"label\":\"Down\",\"score\":0.8}],\"type\":\"chart\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"An + easy-to-use model for command recognition.\",\"id\":\"speechbrain/google_speech_command_xvector\"},{\"description\":\"An + emotion recognition model.\",\"id\":\"ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition\"},{\"description\":\"A + language identification model.\",\"id\":\"facebook/mms-lid-126\"}],\"spaces\":[{\"description\":\"An + application that can classify music into different genre.\",\"id\":\"kurianbenoy/audioclassification\"}],\"summary\":\"Audio + classification is the task of assigning a label or class to a given audio. + It can be used for recognizing which command a user is giving or the emotion + of a statement, as well as identifying a speaker.\",\"widgetModels\":[\"MIT/ast-finetuned-audioset-10-10-0.4593\"],\"youtubeId\":\"KWwzcmG98Ds\",\"id\":\"audio-classification\",\"label\":\"Audio + Classification\",\"libraries\":[\"speechbrain\",\"transformers\",\"transformers.js\"]},\"audio-to-audio\":{\"datasets\":[{\"description\":\"512-element + X-vector embeddings of speakers from CMU ARCTIC dataset.\",\"id\":\"Matthijs/cmu-arctic-xvectors\"}],\"demo\":{\"inputs\":[{\"filename\":\"input.wav\",\"type\":\"audio\"}],\"outputs\":[{\"filename\":\"label-0.wav\",\"type\":\"audio\"},{\"filename\":\"label-1.wav\",\"type\":\"audio\"}]},\"metrics\":[{\"description\":\"The + Signal-to-Noise ratio is the relationship between the target signal level + and the background noise level. It is calculated as the logarithm of the target + signal divided by the background noise, in decibels.\",\"id\":\"snri\"},{\"description\":\"The + Signal-to-Distortion ratio is the relationship between the target signal and + the sum of noise, interference, and artifact errors\",\"id\":\"sdri\"}],\"models\":[{\"description\":\"A + solid model of audio source separation.\",\"id\":\"speechbrain/sepformer-wham\"},{\"description\":\"A + speech enhancement model.\",\"id\":\"ResembleAI/resemble-enhance\"},{\"description\":\"A + model that can change the voice in a speech recording.\",\"id\":\"microsoft/speecht5_vc\"}],\"spaces\":[{\"description\":\"An + application for speech separation.\",\"id\":\"younver/speechbrain-speech-separation\"},{\"description\":\"An + application for audio style transfer.\",\"id\":\"nakas/audio-diffusion_style_transfer\"}],\"summary\":\"Audio-to-Audio + is a family of tasks in which the input is an audio and the output is one + or multiple generated audios. Some example tasks are speech enhancement and + source separation.\",\"widgetModels\":[\"speechbrain/sepformer-wham\"],\"youtubeId\":\"iohj7nCCYoM\",\"id\":\"audio-to-audio\",\"label\":\"Audio-to-Audio\",\"libraries\":[\"asteroid\",\"fairseq\",\"speechbrain\"]},\"automatic-speech-recognition\":{\"datasets\":[{\"description\":\"31,175 + hours of multilingual audio-text dataset in 108 languages.\",\"id\":\"mozilla-foundation/common_voice_17_0\"},{\"description\":\"A + dataset with 44.6k hours of English speaker data and 6k hours of other language + speakers.\",\"id\":\"parler-tts/mls_eng\"},{\"description\":\"A multi-lingual + audio dataset with 370K hours of audio.\",\"id\":\"espnet/yodas\"}],\"demo\":{\"inputs\":[{\"filename\":\"input.flac\",\"type\":\"audio\"}],\"outputs\":[{\"label\":\"Transcript\",\"content\":\"Going + along slushy country roads and speaking to damp audiences in...\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"wer\"},{\"description\":\"\",\"id\":\"cer\"}],\"models\":[{\"description\":\"A + powerful ASR model by OpenAI.\",\"id\":\"openai/whisper-large-v3\"},{\"description\":\"A + good generic speech model by MetaAI for fine-tuning.\",\"id\":\"facebook/w2v-bert-2.0\"},{\"description\":\"An + end-to-end model that performs ASR and Speech Translation by MetaAI.\",\"id\":\"facebook/seamless-m4t-v2-large\"},{\"description\":\"Powerful + speaker diarization model.\",\"id\":\"pyannote/speaker-diarization-3.1\"}],\"spaces\":[{\"description\":\"A + powerful general-purpose speech recognition application.\",\"id\":\"hf-audio/whisper-large-v3\"},{\"description\":\"Fastest + speech recognition application.\",\"id\":\"sanchit-gandhi/whisper-jax\"},{\"description\":\"A + high quality speech and text translation model by Meta.\",\"id\":\"facebook/seamless_m4t\"}],\"summary\":\"Automatic + Speech Recognition (ASR), also known as Speech to Text (STT), is the task + of transcribing a given audio to text. It has many applications, such as voice + user interfaces.\",\"widgetModels\":[\"openai/whisper-large-v3\"],\"youtubeId\":\"TksaY_FDgnk\",\"id\":\"automatic-speech-recognition\",\"label\":\"Automatic + Speech Recognition\",\"libraries\":[\"espnet\",\"nemo\",\"speechbrain\",\"transformers\",\"transformers.js\"]},\"depth-estimation\":{\"datasets\":[{\"description\":\"NYU + Depth V2 Dataset: Video dataset containing both RGB and depth sensor data.\",\"id\":\"sayakpaul/nyu_depth_v2\"},{\"description\":\"Monocular + depth estimation benchmark based without noise and errors.\",\"id\":\"depth-anything/DA-2K\"}],\"demo\":{\"inputs\":[{\"filename\":\"depth-estimation-input.jpg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"depth-estimation-output.png\",\"type\":\"img\"}]},\"metrics\":[],\"models\":[{\"description\":\"Cutting-edge + depth estimation model.\",\"id\":\"depth-anything/Depth-Anything-V2-Large\"},{\"description\":\"A + strong monocular depth estimation model.\",\"id\":\"jingheya/lotus-depth-g-v1-0\"},{\"description\":\"A + depth estimation model that predicts depth in videos.\",\"id\":\"tencent/DepthCrafter\"},{\"description\":\"A + robust depth estimation model.\",\"id\":\"apple/DepthPro\"}],\"spaces\":[{\"description\":\"An + application that predicts the depth of an image and then reconstruct the 3D + model as voxels.\",\"id\":\"radames/dpt-depth-estimation-3d-voxels\"},{\"description\":\"An + application for bleeding-edge depth estimation.\",\"id\":\"akhaliq/depth-pro\"},{\"description\":\"An + application on cutting-edge depth estimation in videos.\",\"id\":\"tencent/DepthCrafter\"},{\"description\":\"A + human-centric depth estimation application.\",\"id\":\"facebook/sapiens-depth\"}],\"summary\":\"Depth + estimation is the task of predicting depth of the objects present in an image.\",\"widgetModels\":[\"\"],\"youtubeId\":\"\",\"id\":\"depth-estimation\",\"label\":\"Depth + Estimation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"document-question-answering\":{\"datasets\":[{\"description\":\"Largest + document understanding dataset.\",\"id\":\"HuggingFaceM4/Docmatix\"},{\"description\":\"Dataset + from the 2020 DocVQA challenge. The documents are taken from the UCSF Industry + Documents Library.\",\"id\":\"eliolio/docvqa\"}],\"demo\":{\"inputs\":[{\"label\":\"Question\",\"content\":\"What + is the idea behind the consumer relations efficiency team?\",\"type\":\"text\"},{\"filename\":\"document-question-answering-input.png\",\"type\":\"img\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"Balance + cost efficiency with quality customer service\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"The + evaluation metric for the DocVQA challenge is the Average Normalized Levenshtein + Similarity (ANLS). This metric is flexible to character regognition errors + and compares the predicted answer with the ground truth answer.\",\"id\":\"anls\"},{\"description\":\"Exact + Match is a metric based on the strict character match of the predicted answer + and the right answer. For answers predicted correctly, the Exact Match will + be 1. Even if only one character is different, Exact Match will be 0\",\"id\":\"exact-match\"}],\"models\":[{\"description\":\"A + robust document question answering model.\",\"id\":\"impira/layoutlm-document-qa\"},{\"description\":\"A + document question answering model specialized in invoices.\",\"id\":\"impira/layoutlm-invoices\"},{\"description\":\"A + special model for OCR-free document question answering.\",\"id\":\"microsoft/udop-large\"},{\"description\":\"A + powerful model for document question answering.\",\"id\":\"google/pix2struct-docvqa-large\"}],\"spaces\":[{\"description\":\"A + robust document question answering application.\",\"id\":\"impira/docquery\"},{\"description\":\"An + application that can answer questions from invoices.\",\"id\":\"impira/invoices\"},{\"description\":\"An + application to compare different document question answering models.\",\"id\":\"merve/compare_docvqa_models\"}],\"summary\":\"Document + Question Answering (also known as Document Visual Question Answering) is the + task of answering questions on document images. Document question answering + models take a (document, question) pair as input and return an answer in natural + language. Models usually rely on multi-modal features, combining text, position + of words (bounding-boxes) and image.\",\"widgetModels\":[\"impira/layoutlm-invoices\"],\"youtubeId\":\"\",\"id\":\"document-question-answering\",\"label\":\"Document + Question Answering\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"feature-extraction\":{\"datasets\":[{\"description\":\"Wikipedia + dataset containing cleaned articles of all languages. Can be used to train + `feature-extraction` models.\",\"id\":\"wikipedia\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"India, + officially the Republic of India, is a country in South Asia.\",\"type\":\"text\"}],\"outputs\":[{\"table\":[[\"Dimension + 1\",\"Dimension 2\",\"Dimension 3\"],[\"2.583383083343506\",\"2.757075071334839\",\"0.9023529887199402\"],[\"8.29393482208252\",\"1.1071064472198486\",\"2.03399395942688\"],[\"-0.7754912972450256\",\"-1.647324562072754\",\"-0.6113331913948059\"],[\"0.07087723910808563\",\"1.5942802429199219\",\"1.4610432386398315\"]],\"type\":\"tabular\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + powerful feature extraction model for natural language processing tasks.\",\"id\":\"thenlper/gte-large\"},{\"description\":\"A + strong feature extraction model for retrieval.\",\"id\":\"Alibaba-NLP/gte-Qwen1.5-7B-instruct\"}],\"spaces\":[{\"description\":\"A + leaderboard to rank text feature extraction models based on a benchmark.\",\"id\":\"mteb/leaderboard\"},{\"description\":\"A + leaderboard to rank best feature extraction models based on human feedback.\",\"id\":\"mteb/arena\"}],\"summary\":\"Feature + extraction is the task of extracting features learnt in a model.\",\"widgetModels\":[\"facebook/bart-base\"],\"id\":\"feature-extraction\",\"label\":\"Feature + Extraction\",\"libraries\":[\"sentence-transformers\",\"transformers\",\"transformers.js\"]},\"fill-mask\":{\"datasets\":[{\"description\":\"A + common dataset that is used to train models for many languages.\",\"id\":\"wikipedia\"},{\"description\":\"A + large English dataset with text crawled from the web.\",\"id\":\"c4\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"The + barked at me\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"wolf\",\"score\":0.487},{\"label\":\"dog\",\"score\":0.061},{\"label\":\"cat\",\"score\":0.058},{\"label\":\"fox\",\"score\":0.047},{\"label\":\"squirrel\",\"score\":0.025}]}]},\"metrics\":[{\"description\":\"Cross + Entropy is a metric that calculates the difference between two probability + distributions. Each probability distribution is the distribution of predicted + words\",\"id\":\"cross_entropy\"},{\"description\":\"Perplexity is the exponential + of the cross-entropy loss. It evaluates the probabilities assigned to the + next word by the model. Lower perplexity indicates better performance\",\"id\":\"perplexity\"}],\"models\":[{\"description\":\"The + famous BERT model.\",\"id\":\"google-bert/bert-base-uncased\"},{\"description\":\"A + multilingual model trained on 100 languages.\",\"id\":\"FacebookAI/xlm-roberta-base\"}],\"spaces\":[],\"summary\":\"Masked + language modeling is the task of masking some of the words in a sentence and + predicting which words should replace those masks. These models are useful + when we want to get a statistical understanding of the language in which the + model is trained in.\",\"widgetModels\":[\"distilroberta-base\"],\"youtubeId\":\"mqElG5QJWUg\",\"id\":\"fill-mask\",\"label\":\"Fill-Mask\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"image-classification\":{\"datasets\":[{\"description\":\"Benchmark + dataset used for image classification with images that belong to 100 classes.\",\"id\":\"cifar100\"},{\"description\":\"Dataset + consisting of images of garments.\",\"id\":\"fashion_mnist\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-classification-input.jpeg\",\"type\":\"img\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Egyptian + cat\",\"score\":0.514},{\"label\":\"Tabby cat\",\"score\":0.193},{\"label\":\"Tiger + cat\",\"score\":0.068}]}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + strong image classification model.\",\"id\":\"google/vit-base-patch16-224\"},{\"description\":\"A + robust image classification model.\",\"id\":\"facebook/deit-base-distilled-patch16-224\"},{\"description\":\"A + strong image classification model.\",\"id\":\"facebook/convnext-large-224\"}],\"spaces\":[{\"description\":\"An + application that classifies what a given image is about.\",\"id\":\"nielsr/perceiver-image-classification\"}],\"summary\":\"Image + classification is the task of assigning a label or class to an entire image. + Images are expected to have only one class for each image. Image classification + models take an image as input and return a prediction about which class the + image belongs to.\",\"widgetModels\":[\"google/vit-base-patch16-224\"],\"youtubeId\":\"tjAIM7BOYhw\",\"id\":\"image-classification\",\"label\":\"Image + Classification\",\"libraries\":[\"keras\",\"timm\",\"transformers\",\"transformers.js\"]},\"image-feature-extraction\":{\"datasets\":[{\"description\":\"ImageNet-1K + is a image classification dataset in which images are used to train image-feature-extraction + models.\",\"id\":\"imagenet-1k\"}],\"demo\":{\"inputs\":[{\"filename\":\"mask-generation-input.png\",\"type\":\"img\"}],\"outputs\":[{\"table\":[[\"Dimension + 1\",\"Dimension 2\",\"Dimension 3\"],[\"0.21236686408519745\",\"1.0919708013534546\",\"0.8512550592422485\"],[\"0.809657871723175\",\"-0.18544459342956543\",\"-0.7851548194885254\"],[\"1.3103108406066895\",\"-0.2479034662246704\",\"-0.9107287526130676\"],[\"1.8536205291748047\",\"-0.36419737339019775\",\"0.09717650711536407\"]],\"type\":\"tabular\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + powerful image feature extraction model.\",\"id\":\"timm/vit_large_patch14_dinov2.lvd142m\"},{\"description\":\"A + strong image feature extraction model.\",\"id\":\"nvidia/MambaVision-T-1K\"},{\"description\":\"A + robust image feature extraction model.\",\"id\":\"facebook/dino-vitb16\"},{\"description\":\"Strong + image feature extraction model made for information retrieval from documents.\",\"id\":\"vidore/colpali\"},{\"description\":\"Strong + image feature extraction model that can be used on images and documents.\",\"id\":\"OpenGVLab/InternViT-6B-448px-V1-2\"}],\"spaces\":[],\"summary\":\"Image + feature extraction is the task of extracting features learnt in a computer + vision model.\",\"widgetModels\":[],\"id\":\"image-feature-extraction\",\"label\":\"Image + Feature Extraction\",\"libraries\":[\"timm\",\"transformers\"]},\"image-segmentation\":{\"datasets\":[{\"description\":\"Scene + segmentation dataset.\",\"id\":\"scene_parse_150\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-segmentation-input.jpeg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"image-segmentation-output.png\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"Average + Precision (AP) is the Area Under the PR Curve (AUC-PR). It is calculated for + each semantic class separately\",\"id\":\"Average Precision\"},{\"description\":\"Mean + Average Precision (mAP) is the overall average of the AP values\",\"id\":\"Mean + Average Precision\"},{\"description\":\"Intersection over Union (IoU) is the + overlap of segmentation masks. Mean IoU is the average of the IoU of all semantic + classes\",\"id\":\"Mean Intersection over Union\"},{\"description\":\"AP\u03B1 + is the Average Precision at the IoU threshold of a \u03B1 value, for example, + AP50 and AP75\",\"id\":\"AP\u03B1\"}],\"models\":[{\"description\":\"Solid + semantic segmentation model trained on ADE20k.\",\"id\":\"openmmlab/upernet-convnext-small\"},{\"description\":\"Background + removal model.\",\"id\":\"briaai/RMBG-1.4\"},{\"description\":\"A multipurpose + image segmentation model for high resolution images.\",\"id\":\"ZhengPeng7/BiRefNet\"},{\"description\":\"Powerful + human-centric image segmentation model.\",\"id\":\"facebook/sapiens-seg-1b\"},{\"description\":\"Panoptic + segmentation model trained on the COCO (common objects) dataset.\",\"id\":\"facebook/mask2former-swin-large-coco-panoptic\"}],\"spaces\":[{\"description\":\"A + semantic segmentation application that can predict unseen instances out of + the box.\",\"id\":\"facebook/ov-seg\"},{\"description\":\"One of the strongest + segmentation applications.\",\"id\":\"jbrinkma/segment-anything\"},{\"description\":\"A + human-centric segmentation model.\",\"id\":\"facebook/sapiens-pose\"},{\"description\":\"An + instance segmentation application to predict neuronal cell types from microscopy + images.\",\"id\":\"rashmi/sartorius-cell-instance-segmentation\"},{\"description\":\"An + application that segments videos.\",\"id\":\"ArtGAN/Segment-Anything-Video\"},{\"description\":\"An + panoptic segmentation application built for outdoor environments.\",\"id\":\"segments/panoptic-segment-anything\"}],\"summary\":\"Image + Segmentation divides an image into segments where each pixel in the image + is mapped to an object. This task has multiple variants such as instance segmentation, + panoptic segmentation and semantic segmentation.\",\"widgetModels\":[\"nvidia/segformer-b0-finetuned-ade-512-512\"],\"youtubeId\":\"dKE8SIt9C-w\",\"id\":\"image-segmentation\",\"label\":\"Image + Segmentation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"image-to-image\":{\"datasets\":[{\"description\":\"Synthetic + dataset, for image relighting\",\"id\":\"VIDIT\"},{\"description\":\"Multiple + images of celebrities, used for facial expression translation\",\"id\":\"huggan/CelebA-faces\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-to-image-input.jpeg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"image-to-image-output.png\",\"type\":\"img\"}]},\"isPlaceholder\":false,\"metrics\":[{\"description\":\"Peak + Signal to Noise Ratio (PSNR) is an approximation of the human perception, + considering the ratio of the absolute intensity with respect to the variations. + Measured in dB, a high value indicates a high fidelity.\",\"id\":\"PSNR\"},{\"description\":\"Structural + Similarity Index (SSIM) is a perceptual metric which compares the luminance, + contrast and structure of two images. The values of SSIM range between -1 + and 1, and higher values indicate closer resemblance to the original image.\",\"id\":\"SSIM\"},{\"description\":\"Inception + Score (IS) is an analysis of the labels predicted by an image classification + model when presented with a sample of the generated images.\",\"id\":\"IS\"}],\"models\":[{\"description\":\"An + image-to-image model to improve image resolution.\",\"id\":\"fal/AuraSR-v2\"},{\"description\":\"A + model that increases the resolution of an image.\",\"id\":\"keras-io/super-resolution\"},{\"description\":\"A + model that creates a set of variations of the input image in the style of + DALL-E using Stable Diffusion.\",\"id\":\"lambdalabs/sd-image-variations-diffusers\"},{\"description\":\"A + model that generates images based on segments in the input image and the text + prompt.\",\"id\":\"mfidabel/controlnet-segment-anything\"},{\"description\":\"A + model that takes an image and an instruction to edit the image.\",\"id\":\"timbrooks/instruct-pix2pix\"}],\"spaces\":[{\"description\":\"Image + enhancer application for low light.\",\"id\":\"keras-io/low-light-image-enhancement\"},{\"description\":\"Style + transfer application.\",\"id\":\"keras-io/neural-style-transfer\"},{\"description\":\"An + application that generates images based on segment control.\",\"id\":\"mfidabel/controlnet-segment-anything\"},{\"description\":\"Image + generation application that takes image control and text prompt.\",\"id\":\"hysts/ControlNet\"},{\"description\":\"Colorize + any image using this app.\",\"id\":\"ioclab/brightness-controlnet\"},{\"description\":\"Edit + images with instructions.\",\"id\":\"timbrooks/instruct-pix2pix\"}],\"summary\":\"Image-to-image + is the task of transforming an input image through a variety of possible manipulations + and enhancements, such as super-resolution, image inpainting, colorization, + and more.\",\"widgetModels\":[\"stabilityai/stable-diffusion-2-inpainting\"],\"youtubeId\":\"\",\"id\":\"image-to-image\",\"label\":\"Image-to-Image\",\"libraries\":[\"diffusers\",\"transformers\",\"transformers.js\"]},\"image-text-to-text\":{\"datasets\":[{\"description\":\"Instructions + composed of image and text.\",\"id\":\"liuhaotian/LLaVA-Instruct-150K\"},{\"description\":\"Conversation + turns where questions involve image and text.\",\"id\":\"liuhaotian/LLaVA-Pretrain\"},{\"description\":\"A + collection of datasets made for model fine-tuning.\",\"id\":\"HuggingFaceM4/the_cauldron\"},{\"description\":\"Screenshots + of websites with their HTML/CSS codes.\",\"id\":\"HuggingFaceM4/WebSight\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-text-to-text-input.png\",\"type\":\"img\"},{\"label\":\"Text + Prompt\",\"content\":\"Describe the position of the bee in detail.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"The + bee is sitting on a pink flower, surrounded by other flowers. The bee is positioned + in the center of the flower, with its head and front legs sticking out.\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"Powerful + vision language model with great visual understanding and reasoning capabilities.\",\"id\":\"meta-llama/Llama-3.2-11B-Vision-Instruct\"},{\"description\":\"Cutting-edge + vision language models.\",\"id\":\"allenai/Molmo-7B-D-0924\"},{\"description\":\"Small + yet powerful model.\",\"id\":\"vikhyatk/moondream2\"},{\"description\":\"Strong + image-text-to-text model.\",\"id\":\"Qwen/Qwen2-VL-7B-Instruct\"},{\"description\":\"Strong + image-text-to-text model.\",\"id\":\"mistralai/Pixtral-12B-2409\"},{\"description\":\"Strong + image-text-to-text model focused on documents.\",\"id\":\"stepfun-ai/GOT-OCR2_0\"}],\"spaces\":[{\"description\":\"Leaderboard + to evaluate vision language models.\",\"id\":\"opencompass/open_vlm_leaderboard\"},{\"description\":\"Vision + language models arena, where models are ranked by votes of users.\",\"id\":\"WildVision/vision-arena\"},{\"description\":\"Powerful + vision-language model assistant.\",\"id\":\"akhaliq/Molmo-7B-D-0924\"},{\"description\":\"An + image-text-to-text application focused on documents.\",\"id\":\"stepfun-ai/GOT_official_online_demo\"},{\"description\":\"An + application to compare outputs of different vision language models.\",\"id\":\"merve/compare_VLMs\"},{\"description\":\"An + application for chatting with an image-text-to-text model.\",\"id\":\"GanymedeNil/Qwen2-VL-7B\"}],\"summary\":\"Image-text-to-text + models take in an image and text prompt and output text. These models are + also called vision-language models, or VLMs. The difference from image-to-text + models is that these models take an additional text input, not restricting + the model to certain use cases like image captioning, and may also be trained + to accept a conversation as input.\",\"widgetModels\":[\"meta-llama/Llama-3.2-11B-Vision-Instruct\"],\"youtubeId\":\"IoGaGfU1CIg\",\"id\":\"image-text-to-text\",\"label\":\"Image-Text-to-Text\",\"libraries\":[\"transformers\"]},\"image-to-text\":{\"datasets\":[{\"description\":\"Dataset + from 12M image-text of Reddit\",\"id\":\"red_caps\"},{\"description\":\"Dataset + from 3.3M images of Google\",\"id\":\"datasets/conceptual_captions\"}],\"demo\":{\"inputs\":[{\"filename\":\"savanna.jpg\",\"type\":\"img\"}],\"outputs\":[{\"label\":\"Detailed + description\",\"content\":\"a herd of giraffes and zebras grazing in a field\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + robust image captioning model.\",\"id\":\"Salesforce/blip2-opt-2.7b\"},{\"description\":\"A + powerful and accurate image-to-text model that can also localize concepts + in images.\",\"id\":\"microsoft/kosmos-2-patch14-224\"},{\"description\":\"A + strong optical character recognition model.\",\"id\":\"facebook/nougat-base\"},{\"description\":\"A + powerful model that lets you have a conversation with the image.\",\"id\":\"llava-hf/llava-1.5-7b-hf\"}],\"spaces\":[{\"description\":\"An + application that compares various image captioning models.\",\"id\":\"nielsr/comparing-captioning-models\"},{\"description\":\"A + robust image captioning application.\",\"id\":\"flax-community/image-captioning\"},{\"description\":\"An + application that transcribes handwritings into text.\",\"id\":\"nielsr/TrOCR-handwritten\"},{\"description\":\"An + application that can caption images and answer questions about a given image.\",\"id\":\"Salesforce/BLIP\"},{\"description\":\"An + application that can caption images and answer questions with a conversational + agent.\",\"id\":\"Salesforce/BLIP2\"},{\"description\":\"An image captioning + application that demonstrates the effect of noise on captions.\",\"id\":\"johko/capdec-image-captioning\"}],\"summary\":\"Image + to text models output a text from a given image. Image captioning or optical + character recognition can be considered as the most common applications of + image to text.\",\"widgetModels\":[\"Salesforce/blip-image-captioning-large\"],\"youtubeId\":\"\",\"id\":\"image-to-text\",\"label\":\"Image-to-Text\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"keypoint-detection\":{\"datasets\":[{\"description\":\"A + dataset of hand keypoints of over 500k examples.\",\"id\":\"Vincent-luo/hagrid-mediapipe-hands\"}],\"demo\":{\"inputs\":[{\"filename\":\"keypoint-detection-input.png\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"keypoint-detection-output.png\",\"type\":\"img\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + robust keypoint detection model.\",\"id\":\"magic-leap-community/superpoint\"},{\"description\":\"Strong + keypoint detection model used to detect human pose.\",\"id\":\"facebook/sapiens-pose-1b\"}],\"spaces\":[{\"description\":\"An + application that detects hand keypoints in real-time.\",\"id\":\"datasciencedojo/Hand-Keypoint-Detection-Realtime\"},{\"description\":\"An + application to try a universal keypoint detection model.\",\"id\":\"merve/SuperPoint\"}],\"summary\":\"Keypoint + detection is the task of identifying meaningful distinctive points or features + in an image.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"keypoint-detection\",\"label\":\"Keypoint + Detection\",\"libraries\":[\"transformers\"]},\"mask-generation\":{\"datasets\":[],\"demo\":{\"inputs\":[{\"filename\":\"mask-generation-input.png\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"mask-generation-output.png\",\"type\":\"img\"}]},\"metrics\":[],\"models\":[{\"description\":\"Small + yet powerful mask generation model.\",\"id\":\"Zigeng/SlimSAM-uniform-50\"},{\"description\":\"Very + strong mask generation model.\",\"id\":\"facebook/sam2-hiera-large\"}],\"spaces\":[{\"description\":\"An + application that combines a mask generation model with a zero-shot object + detection model for text-guided image segmentation.\",\"id\":\"merve/OWLSAM2\"},{\"description\":\"An + application that compares the performance of a large and a small mask generation + model.\",\"id\":\"merve/slimsam\"},{\"description\":\"An application based + on an improved mask generation model.\",\"id\":\"SkalskiP/segment-anything-model-2\"},{\"description\":\"An + application to remove objects from videos using mask generation models.\",\"id\":\"SkalskiP/SAM_and_ProPainter\"}],\"summary\":\"Mask + generation is the task of generating masks that identify a specific object + or region of interest in a given image. Masks are often used in segmentation + tasks, where they provide a precise way to isolate the object of interest + for further processing or analysis.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"mask-generation\",\"label\":\"Mask + Generation\",\"libraries\":[\"transformers\"]},\"object-detection\":{\"datasets\":[{\"description\":\"Widely + used benchmark dataset for multiple vision tasks.\",\"id\":\"merve/coco2017\"},{\"description\":\"Multi-task + computer vision benchmark.\",\"id\":\"merve/pascal-voc\"}],\"demo\":{\"inputs\":[{\"filename\":\"object-detection-input.jpg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"object-detection-output.jpg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + Average Precision (AP) metric is the Area Under the PR Curve (AUC-PR). It + is calculated for each class separately\",\"id\":\"Average Precision\"},{\"description\":\"The + Mean Average Precision (mAP) metric is the overall average of the AP values\",\"id\":\"Mean + Average Precision\"},{\"description\":\"The AP\u03B1 metric is the Average + Precision at the IoU threshold of a \u03B1 value, for example, AP50 and AP75\",\"id\":\"AP\u03B1\"}],\"models\":[{\"description\":\"Solid + object detection model pre-trained on the COCO 2017 dataset.\",\"id\":\"facebook/detr-resnet-50\"},{\"description\":\"Real-time + and accurate object detection model.\",\"id\":\"jameslahm/yolov10x\"},{\"description\":\"Fast + and accurate object detection model trained on COCO and Object365 datasets.\",\"id\":\"PekingU/rtdetr_r18vd_coco_o365\"}],\"spaces\":[{\"description\":\"Leaderboard + to compare various object detection models across several metrics.\",\"id\":\"hf-vision/object_detection_leaderboard\"},{\"description\":\"An + application that contains various object detection models to try from.\",\"id\":\"Gradio-Blocks/Object-Detection-With-DETR-and-YOLOS\"},{\"description\":\"An + application that shows multiple cutting edge techniques for object detection + and tracking.\",\"id\":\"kadirnar/torchyolo\"},{\"description\":\"An object + tracking, segmentation and inpainting application.\",\"id\":\"VIPLab/Track-Anything\"},{\"description\":\"Very + fast object tracking application based on object detection.\",\"id\":\"merve/RT-DETR-tracking-coco\"}],\"summary\":\"Object + Detection models allow users to identify objects of certain defined classes. + Object detection models receive an image as input and output the images with + bounding boxes and labels on detected objects.\",\"widgetModels\":[\"facebook/detr-resnet-50\"],\"youtubeId\":\"WdAeKSOpxhw\",\"id\":\"object-detection\",\"label\":\"Object + Detection\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"video-classification\":{\"datasets\":[{\"description\":\"Benchmark + dataset used for video classification with videos that belong to 400 classes.\",\"id\":\"kinetics400\"}],\"demo\":{\"inputs\":[{\"filename\":\"video-classification-input.gif\",\"type\":\"img\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Playing + Guitar\",\"score\":0.514},{\"label\":\"Playing Tennis\",\"score\":0.193},{\"label\":\"Cooking\",\"score\":0.068}]}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"Strong + Video Classification model trained on the Kinetics 400 dataset.\",\"id\":\"google/vivit-b-16x2-kinetics400\"},{\"description\":\"Strong + Video Classification model trained on the Kinetics 400 dataset.\",\"id\":\"microsoft/xclip-base-patch32\"}],\"spaces\":[{\"description\":\"An + application that classifies video at different timestamps.\",\"id\":\"nateraw/lavila\"},{\"description\":\"An + application that classifies video.\",\"id\":\"fcakyon/video-classification\"}],\"summary\":\"Video + classification is the task of assigning a label or class to an entire video. + Videos are expected to have only one class for each video. Video classification + models take a video as input and return a prediction about which class the + video belongs to.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"video-classification\",\"label\":\"Video + Classification\",\"libraries\":[\"transformers\"]},\"question-answering\":{\"datasets\":[{\"description\":\"A + famous question answering dataset based on English articles from Wikipedia.\",\"id\":\"squad_v2\"},{\"description\":\"A + dataset of aggregated anonymized actual queries issued to the Google search + engine.\",\"id\":\"natural_questions\"}],\"demo\":{\"inputs\":[{\"label\":\"Question\",\"content\":\"Which + name is also used to describe the Amazon rainforest in English?\",\"type\":\"text\"},{\"label\":\"Context\",\"content\":\"The + Amazon rainforest, also known in English as Amazonia or the Amazon Jungle\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"Amazonia\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Exact + Match is a metric based on the strict character match of the predicted answer + and the right answer. For answers predicted correctly, the Exact Match will + be 1. Even if only one character is different, Exact Match will be 0\",\"id\":\"exact-match\"},{\"description\":\" + The F1-Score metric is useful if we value both false positives and false negatives + equally. The F1-Score is calculated on each word in the predicted sequence + against the correct answer\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + robust baseline model for most question answering domains.\",\"id\":\"deepset/roberta-base-squad2\"},{\"description\":\"Small + yet robust model that can answer questions.\",\"id\":\"distilbert/distilbert-base-cased-distilled-squad\"},{\"description\":\"A + special model that can answer questions from tables.\",\"id\":\"google/tapas-base-finetuned-wtq\"}],\"spaces\":[{\"description\":\"An + application that can answer a long question from Wikipedia.\",\"id\":\"deepset/wikipedia-assistant\"}],\"summary\":\"Question + Answering models can retrieve the answer to a question from a given text, + which is useful for searching for an answer in a document. Some question answering + models can generate answers without context!\",\"widgetModels\":[\"deepset/roberta-base-squad2\"],\"youtubeId\":\"ajPx5LwJD-I\",\"id\":\"question-answering\",\"label\":\"Question + Answering\",\"libraries\":[\"adapter-transformers\",\"allennlp\",\"transformers\",\"transformers.js\"]},\"reinforcement-learning\":{\"datasets\":[{\"description\":\"A + curation of widely used datasets for Data Driven Deep Reinforcement Learning + (D4RL)\",\"id\":\"edbeeching/decision_transformer_gym_replay\"}],\"demo\":{\"inputs\":[{\"label\":\"State\",\"content\":\"Red + traffic light, pedestrians are about to pass.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Action\",\"content\":\"Stop + the car.\",\"type\":\"text\"},{\"label\":\"Next State\",\"content\":\"Yellow + light, pedestrians have crossed.\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Accumulated + reward across all time steps discounted by a factor that ranges between 0 + and 1 and determines how much the agent optimizes for future relative to immediate + rewards. Measures how good is the policy ultimately found by a given algorithm + considering uncertainty over the future.\",\"id\":\"Discounted Total Reward\"},{\"description\":\"Average + return obtained after running the policy for a certain number of evaluation + episodes. As opposed to total reward, mean reward considers how much reward + a given algorithm receives while learning.\",\"id\":\"Mean Reward\"},{\"description\":\"Measures + how good a given algorithm is after a predefined time. Some algorithms may + be guaranteed to converge to optimal behavior across many time steps. However, + an agent that reaches an acceptable level of optimality after a given time + horizon may be preferable to one that ultimately reaches optimality but takes + a long time.\",\"id\":\"Level of Performance After Some Time\"}],\"models\":[{\"description\":\"A + Reinforcement Learning model trained on expert data from the Gym Hopper environment\",\"id\":\"edbeeching/decision-transformer-gym-hopper-expert\"},{\"description\":\"A + PPO agent playing seals/CartPole-v0 using the stable-baselines3 library and + the RL Zoo.\",\"id\":\"HumanCompatibleAI/ppo-seals-CartPole-v0\"}],\"spaces\":[{\"description\":\"An + application for a cute puppy agent learning to catch a stick.\",\"id\":\"ThomasSimonini/Huggy\"},{\"description\":\"An + application to play Snowball Fight with a reinforcement learning agent.\",\"id\":\"ThomasSimonini/SnowballFight\"}],\"summary\":\"Reinforcement + learning is the computational approach of learning from action by interacting + with an environment through trial and error and receiving rewards (negative + or positive) as feedback\",\"widgetModels\":[],\"youtubeId\":\"q0BiUn5LiBc\",\"id\":\"reinforcement-learning\",\"label\":\"Reinforcement + Learning\",\"libraries\":[\"transformers\",\"stable-baselines3\",\"ml-agents\",\"sample-factory\"]},\"sentence-similarity\":{\"datasets\":[{\"description\":\"Bing + queries with relevant passages from various web sources.\",\"id\":\"ms_marco\"}],\"demo\":{\"inputs\":[{\"label\":\"Source + sentence\",\"content\":\"Machine learning is so easy.\",\"type\":\"text\"},{\"label\":\"Sentences + to compare to\",\"content\":\"Deep learning is so straightforward.\",\"type\":\"text\"},{\"label\":\"\",\"content\":\"This + is so difficult, like rocket science.\",\"type\":\"text\"},{\"label\":\"\",\"content\":\"I + can't believe how much I struggled with this.\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Deep + learning is so straightforward.\",\"score\":0.623},{\"label\":\"This is so + difficult, like rocket science.\",\"score\":0.413},{\"label\":\"I can't believe + how much I struggled with this.\",\"score\":0.256}]}]},\"metrics\":[{\"description\":\"Reciprocal + Rank is a measure used to rank the relevancy of documents given a set of documents. + Reciprocal Rank is the reciprocal of the rank of the document retrieved, meaning, + if the rank is 3, the Reciprocal Rank is 0.33. If the rank is 1, the Reciprocal + Rank is 1\",\"id\":\"Mean Reciprocal Rank\"},{\"description\":\"The similarity + of the embeddings is evaluated mainly on cosine similarity. It is calculated + as the cosine of the angle between two vectors. It is particularly useful + when your texts are not the same length\",\"id\":\"Cosine Similarity\"}],\"models\":[{\"description\":\"This + model works well for sentences and paragraphs and can be used for clustering/grouping + and semantic searches.\",\"id\":\"sentence-transformers/all-mpnet-base-v2\"},{\"description\":\"A + multilingual robust sentence similarity model..\",\"id\":\"BAAI/bge-m3\"}],\"spaces\":[{\"description\":\"An + application that leverages sentence similarity to answer questions from YouTube + videos.\",\"id\":\"Gradio-Blocks/Ask_Questions_To_YouTube_Videos\"},{\"description\":\"An + application that retrieves relevant PubMed abstracts for a given online article + which can be used as further references.\",\"id\":\"Gradio-Blocks/pubmed-abstract-retriever\"},{\"description\":\"An + application that leverages sentence similarity to summarize text.\",\"id\":\"nickmuchi/article-text-summarizer\"},{\"description\":\"A + guide that explains how Sentence Transformers can be used for semantic search.\",\"id\":\"sentence-transformers/Sentence_Transformers_for_semantic_search\"}],\"summary\":\"Sentence + Similarity is the task of determining how similar two texts are. Sentence + similarity models convert input texts into vectors (embeddings) that capture + semantic information and calculate how close (similar) they are between them. + This task is particularly useful for information retrieval and clustering/grouping.\",\"widgetModels\":[\"BAAI/bge-small-en-v1.5\"],\"youtubeId\":\"VCZq5AkbNEU\",\"id\":\"sentence-similarity\",\"label\":\"Sentence + Similarity\",\"libraries\":[\"sentence-transformers\",\"spacy\",\"transformers.js\"]},\"summarization\":{\"canonicalId\":\"text2text-generation\",\"datasets\":[{\"description\":\"News + articles in five different languages along with their summaries. Widely used + for benchmarking multilingual summarization models.\",\"id\":\"mlsum\"},{\"description\":\"English + conversations and their summaries. Useful for benchmarking conversational + agents.\",\"id\":\"samsum\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"The + tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey + building, and the tallest structure in Paris. Its base is square, measuring + 125 metres (410 ft) on each side. It was the first structure to reach a height + of 300 metres. Excluding transmitters, the Eiffel Tower is the second tallest + free-standing structure in France after the Millau Viaduct.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Output\",\"content\":\"The + tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey + building. It was the first structure to reach a height of 300 metres.\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"The + generated sequence is compared against its summary, and the overlap of tokens + are counted. ROUGE-N refers to overlap of N subsequent tokens, ROUGE-1 refers + to overlap of single tokens and ROUGE-2 is the overlap of two subsequent tokens.\",\"id\":\"rouge\"}],\"models\":[{\"description\":\"A + strong summarization model trained on English news articles. Excels at generating + factual summaries.\",\"id\":\"facebook/bart-large-cnn\"},{\"description\":\"A + summarization model trained on medical articles.\",\"id\":\"Falconsai/medical_summarization\"}],\"spaces\":[{\"description\":\"An + application that can summarize long paragraphs.\",\"id\":\"pszemraj/summarize-long-text\"},{\"description\":\"A + much needed summarization application for terms and conditions.\",\"id\":\"ml6team/distilbart-tos-summarizer-tosdr\"},{\"description\":\"An + application that summarizes long documents.\",\"id\":\"pszemraj/document-summarization\"},{\"description\":\"An + application that can detect errors in abstractive summarization.\",\"id\":\"ml6team/post-processing-summarization\"}],\"summary\":\"Summarization + is the task of producing a shorter version of a document while preserving + its important information. Some models can extract text from the original + input, while other models can generate entirely new text.\",\"widgetModels\":[\"facebook/bart-large-cnn\"],\"youtubeId\":\"yHnr5Dk2zCI\",\"id\":\"summarization\",\"label\":\"Summarization\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"table-question-answering\":{\"datasets\":[{\"description\":\"The + WikiTableQuestions dataset is a large-scale dataset for the task of question + answering on semi-structured tables.\",\"id\":\"wikitablequestions\"},{\"description\":\"WikiSQL + is a dataset of 80654 hand-annotated examples of questions and SQL queries + distributed across 24241 tables from Wikipedia.\",\"id\":\"wikisql\"}],\"demo\":{\"inputs\":[{\"table\":[[\"Rank\",\"Name\",\"No.of + reigns\",\"Combined days\"],[\"1\",\"lou Thesz\",\"3\",\"3749\"],[\"2\",\"Ric + Flair\",\"8\",\"3103\"],[\"3\",\"Harley Race\",\"7\",\"1799\"]],\"type\":\"tabular\"},{\"label\":\"Question\",\"content\":\"What + is the number of reigns for Harley Race?\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Result\",\"content\":\"7\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Checks + whether the predicted answer(s) is the same as the ground-truth answer(s).\",\"id\":\"Denotation + Accuracy\"}],\"models\":[{\"description\":\"A table question answering model + that is capable of neural SQL execution, i.e., employ TAPEX to execute a SQL + query on a given table.\",\"id\":\"microsoft/tapex-base\"},{\"description\":\"A + robust table question answering model.\",\"id\":\"google/tapas-base-finetuned-wtq\"}],\"spaces\":[{\"description\":\"An + application that answers questions based on table CSV files.\",\"id\":\"katanaml/table-query\"}],\"summary\":\"Table + Question Answering (Table QA) is the answering a question about an information + on a given table.\",\"widgetModels\":[\"google/tapas-base-finetuned-wtq\"],\"id\":\"table-question-answering\",\"label\":\"Table + Question Answering\",\"libraries\":[\"transformers\"]},\"tabular-classification\":{\"datasets\":[{\"description\":\"A + comprehensive curation of datasets covering all benchmarks.\",\"id\":\"inria-soda/tabular-benchmark\"}],\"demo\":{\"inputs\":[{\"table\":[[\"Glucose\",\"Blood + Pressure \",\"Skin Thickness\",\"Insulin\",\"BMI\"],[\"148\",\"72\",\"35\",\"0\",\"33.6\"],[\"150\",\"50\",\"30\",\"0\",\"35.1\"],[\"141\",\"60\",\"29\",\"1\",\"39.2\"]],\"type\":\"tabular\"}],\"outputs\":[{\"table\":[[\"Diabetes\"],[\"1\"],[\"1\"],[\"0\"]],\"type\":\"tabular\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"Breast + cancer prediction model based on decision trees.\",\"id\":\"scikit-learn/cancer-prediction-trees\"}],\"spaces\":[{\"description\":\"An + application that can predict defective products on a production line.\",\"id\":\"scikit-learn/tabular-playground\"},{\"description\":\"An + application that compares various tabular classification techniques on different + datasets.\",\"id\":\"scikit-learn/classification\"}],\"summary\":\"Tabular + classification is the task of classifying a target category (a group) based + on set of attributes.\",\"widgetModels\":[\"scikit-learn/tabular-playground\"],\"youtubeId\":\"\",\"id\":\"tabular-classification\",\"label\":\"Tabular + Classification\",\"libraries\":[\"sklearn\"]},\"tabular-regression\":{\"datasets\":[{\"description\":\"A + comprehensive curation of datasets covering all benchmarks.\",\"id\":\"inria-soda/tabular-benchmark\"}],\"demo\":{\"inputs\":[{\"table\":[[\"Car + Name\",\"Horsepower\",\"Weight\"],[\"ford torino\",\"140\",\"3,449\"],[\"amc + hornet\",\"97\",\"2,774\"],[\"toyota corolla\",\"65\",\"1,773\"]],\"type\":\"tabular\"}],\"outputs\":[{\"table\":[[\"MPG + (miles per gallon)\"],[\"17\"],[\"18\"],[\"31\"]],\"type\":\"tabular\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"mse\"},{\"description\":\"Coefficient + of determination (or R-squared) is a measure of how well the model fits the + data. Higher R-squared is considered a better fit.\",\"id\":\"r-squared\"}],\"models\":[{\"description\":\"Fish + weight prediction based on length measurements and species.\",\"id\":\"scikit-learn/Fish-Weight\"}],\"spaces\":[{\"description\":\"An + application that can predict weight of a fish based on set of attributes.\",\"id\":\"scikit-learn/fish-weight-prediction\"}],\"summary\":\"Tabular + regression is the task of predicting a numerical value given a set of attributes.\",\"widgetModels\":[\"scikit-learn/Fish-Weight\"],\"youtubeId\":\"\",\"id\":\"tabular-regression\",\"label\":\"Tabular + Regression\",\"libraries\":[\"sklearn\"]},\"text-classification\":{\"datasets\":[{\"description\":\"A + widely used dataset used to benchmark multiple variants of text classification.\",\"id\":\"nyu-mll/glue\"},{\"description\":\"A + text classification dataset used to benchmark natural language inference models\",\"id\":\"stanfordnlp/snli\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"I + love Hugging Face!\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"POSITIVE\",\"score\":0.9},{\"label\":\"NEUTRAL\",\"score\":0.1},{\"label\":\"NEGATIVE\",\"score\":0}]}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"The + F1 metric is the harmonic mean of the precision and recall. It can be calculated + as: F1 = 2 * (precision * recall) / (precision + recall)\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + robust model trained for sentiment analysis.\",\"id\":\"distilbert/distilbert-base-uncased-finetuned-sst-2-english\"},{\"description\":\"A + sentiment analysis model specialized in financial sentiment.\",\"id\":\"ProsusAI/finbert\"},{\"description\":\"A + sentiment analysis model specialized in analyzing tweets.\",\"id\":\"cardiffnlp/twitter-roberta-base-sentiment-latest\"},{\"description\":\"A + model that can classify languages.\",\"id\":\"papluca/xlm-roberta-base-language-detection\"},{\"description\":\"A + model that can classify text generation attacks.\",\"id\":\"meta-llama/Prompt-Guard-86M\"}],\"spaces\":[{\"description\":\"An + application that can classify financial sentiment.\",\"id\":\"IoannisTr/Tech_Stocks_Trading_Assistant\"},{\"description\":\"A + dashboard that contains various text classification tasks.\",\"id\":\"miesnerjacob/Multi-task-NLP\"},{\"description\":\"An + application that analyzes user reviews in healthcare.\",\"id\":\"spacy/healthsea-demo\"}],\"summary\":\"Text + Classification is the task of assigning a label or class to a given text. + Some use cases are sentiment analysis, natural language inference, and assessing + grammatical correctness.\",\"widgetModels\":[\"distilbert/distilbert-base-uncased-finetuned-sst-2-english\"],\"youtubeId\":\"leNG9fN9FQU\",\"id\":\"text-classification\",\"label\":\"Text + Classification\",\"libraries\":[\"adapter-transformers\",\"setfit\",\"spacy\",\"transformers\",\"transformers.js\"]},\"text-generation\":{\"datasets\":[{\"description\":\"A + large multilingual dataset of text crawled from the web.\",\"id\":\"mc4\"},{\"description\":\"Diverse + open-source data consisting of 22 smaller high-quality datasets. It was used + to train GPT-Neo.\",\"id\":\"the_pile\"},{\"description\":\"Truly open-source, + curated and cleaned dialogue dataset.\",\"id\":\"HuggingFaceH4/ultrachat_200k\"},{\"description\":\"An + instruction dataset with preference ratings on responses.\",\"id\":\"openbmb/UltraFeedback\"},{\"description\":\"A + large synthetic dataset for alignment of text generation models.\",\"id\":\"argilla/magpie-ultra-v0.1\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"Once + upon a time,\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Output\",\"content\":\"Once + upon a time, we knew that our ancestors were on the verge of extinction. The + great explorers and poets of the Old World, from Alexander the Great to Chaucer, + are dead and gone. A good many of our ancient explorers and poets have\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Cross + Entropy is a metric that calculates the difference between two probability + distributions. Each probability distribution is the distribution of predicted + words\",\"id\":\"Cross Entropy\"},{\"description\":\"The Perplexity metric + is the exponential of the cross-entropy loss. It evaluates the probabilities + assigned to the next word by the model. Lower perplexity indicates better + performance\",\"id\":\"Perplexity\"}],\"models\":[{\"description\":\"A text-generation + model trained to follow instructions.\",\"id\":\"google/gemma-2-2b-it\"},{\"description\":\"Very + powerful text generation model trained to follow instructions.\",\"id\":\"meta-llama/Meta-Llama-3.1-8B-Instruct\"},{\"description\":\"Small + yet powerful text generation model.\",\"id\":\"microsoft/Phi-3-mini-4k-instruct\"},{\"description\":\"A + very powerful model that can solve mathematical problems.\",\"id\":\"AI-MO/NuminaMath-7B-TIR\"},{\"description\":\"Strong + text generation model to follow instructions.\",\"id\":\"Qwen/Qwen2.5-7B-Instruct\"},{\"description\":\"Very + strong open-source large language model.\",\"id\":\"nvidia/Llama-3.1-Nemotron-70B-Instruct\"}],\"spaces\":[{\"description\":\"A + leaderboard to compare different open-source text generation models based + on various benchmarks.\",\"id\":\"open-llm-leaderboard/open_llm_leaderboard\"},{\"description\":\"A + leaderboard for comparing chain-of-thought performance of models.\",\"id\":\"logikon/open_cot_leaderboard\"},{\"description\":\"An + text generation based application based on a very powerful LLaMA2 model.\",\"id\":\"ysharma/Explore_llamav2_with_TGI\"},{\"description\":\"An + text generation based application to converse with Zephyr model.\",\"id\":\"HuggingFaceH4/zephyr-chat\"},{\"description\":\"A + leaderboard that ranks text generation models based on blind votes from people.\",\"id\":\"lmsys/chatbot-arena-leaderboard\"},{\"description\":\"An + chatbot to converse with a very powerful text generation model.\",\"id\":\"mlabonne/phixtral-chat\"}],\"summary\":\"Generating + text is the task of generating new text given another text. These models can, + for example, fill in incomplete text or paraphrase.\",\"widgetModels\":[\"mistralai/Mistral-Nemo-Instruct-2407\"],\"youtubeId\":\"e9gNEAlsOvU\",\"id\":\"text-generation\",\"label\":\"Text + Generation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"text-to-image\":{\"datasets\":[{\"description\":\"RedCaps + is a large-scale dataset of 12M image-text pairs collected from Reddit.\",\"id\":\"red_caps\"},{\"description\":\"Conceptual + Captions is a dataset consisting of ~3.3M images annotated with captions.\",\"id\":\"conceptual_captions\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"A + city above clouds, pastel colors, Victorian style\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"image.jpeg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + Inception Score (IS) measure assesses diversity and meaningfulness. It uses + a generated image sample to predict its label. A higher score signifies more + diverse and meaningful images.\",\"id\":\"IS\"},{\"description\":\"The Fr\xE9chet + Inception Distance (FID) calculates the distance between distributions between + synthetic and real samples. A lower FID score indicates better similarity + between the distributions of real and generated images.\",\"id\":\"FID\"},{\"description\":\"R-precision + assesses how the generated image aligns with the provided text description. + It uses the generated images as queries to retrieve relevant text descriptions. + The top 'r' relevant descriptions are selected and used to calculate R-precision + as r/R, where 'R' is the number of ground truth descriptions associated with + the generated images. A higher R-precision value indicates a better model.\",\"id\":\"R-Precision\"}],\"models\":[{\"description\":\"One + of the most powerful image generation models that can generate realistic outputs.\",\"id\":\"black-forest-labs/FLUX.1-dev\"},{\"description\":\"A + powerful yet fast image generation model.\",\"id\":\"latent-consistency/lcm-lora-sdxl\"},{\"description\":\"Text-to-image + model for photorealistic generation.\",\"id\":\"Kwai-Kolors/Kolors\"},{\"description\":\"A + powerful text-to-image model.\",\"id\":\"stabilityai/stable-diffusion-3-medium-diffusers\"}],\"spaces\":[{\"description\":\"A + powerful text-to-image application.\",\"id\":\"stabilityai/stable-diffusion-3-medium\"},{\"description\":\"A + text-to-image application to generate comics.\",\"id\":\"jbilcke-hf/ai-comic-factory\"},{\"description\":\"An + application to match multiple custom image generation models.\",\"id\":\"multimodalart/flux-lora-lab\"},{\"description\":\"A + powerful yet very fast image generation application.\",\"id\":\"latent-consistency/lcm-lora-for-sdxl\"},{\"description\":\"A + gallery to explore various text-to-image models.\",\"id\":\"multimodalart/LoraTheExplorer\"},{\"description\":\"An + application for `text-to-image`, `image-to-image` and image inpainting.\",\"id\":\"ArtGAN/Stable-Diffusion-ControlNet-WebUI\"},{\"description\":\"An + application to generate realistic images given photos of a person and a prompt.\",\"id\":\"InstantX/InstantID\"}],\"summary\":\"Text-to-image + is the task of generating images from input text. These pipelines can also + be used to modify and edit images based on text prompts.\",\"widgetModels\":[\"black-forest-labs/FLUX.1-dev\"],\"youtubeId\":\"\",\"id\":\"text-to-image\",\"label\":\"Text-to-Image\",\"libraries\":[\"diffusers\"]},\"text-to-speech\":{\"canonicalId\":\"text-to-audio\",\"datasets\":[{\"description\":\"10K + hours of multi-speaker English dataset.\",\"id\":\"parler-tts/mls_eng_10k\"},{\"description\":\"Multi-speaker + English dataset.\",\"id\":\"mythicinfinity/libritts_r\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"I + love audio models on the Hub!\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"audio.wav\",\"type\":\"audio\"}]},\"metrics\":[{\"description\":\"The + Mel Cepstral Distortion (MCD) metric is used to calculate the quality of generated + speech.\",\"id\":\"mel cepstral distortion\"}],\"models\":[{\"description\":\"A + powerful TTS model.\",\"id\":\"parler-tts/parler-tts-large-v1\"},{\"description\":\"A + massively multi-lingual TTS model.\",\"id\":\"coqui/XTTS-v2\"},{\"description\":\"Robust + TTS model.\",\"id\":\"metavoiceio/metavoice-1B-v0.1\"},{\"description\":\"A + prompt based, powerful TTS model.\",\"id\":\"parler-tts/parler_tts_mini_v0.1\"}],\"spaces\":[{\"description\":\"An + application for generate highly realistic, multilingual speech.\",\"id\":\"suno/bark\"},{\"description\":\"An + application on XTTS, a voice generation model that lets you clone voices into + different languages.\",\"id\":\"coqui/xtts\"},{\"description\":\"An application + that generates speech in different styles in English and Chinese.\",\"id\":\"mrfakename/E2-F5-TTS\"},{\"description\":\"An + application that synthesizes speech for diverse speaker prompts.\",\"id\":\"parler-tts/parler_tts_mini\"}],\"summary\":\"Text-to-Speech + (TTS) is the task of generating natural sounding speech given text input. + TTS models can be extended to have a single model that generates speech for + multiple speakers and multiple languages.\",\"widgetModels\":[\"suno/bark\"],\"youtubeId\":\"NW62DpzJ274\",\"id\":\"text-to-speech\",\"label\":\"Text-to-Speech\",\"libraries\":[\"espnet\",\"tensorflowtts\",\"transformers\",\"transformers.js\"]},\"text-to-video\":{\"datasets\":[{\"description\":\"Microsoft + Research Video to Text is a large-scale dataset for open domain video captioning\",\"id\":\"iejMac/CLIP-MSR-VTT\"},{\"description\":\"UCF101 + Human Actions dataset consists of 13,320 video clips from YouTube, with 101 + classes.\",\"id\":\"quchenyuan/UCF101-ZIP\"},{\"description\":\"A high-quality + dataset for human action recognition in YouTube videos.\",\"id\":\"nateraw/kinetics\"},{\"description\":\"A + dataset of video clips of humans performing pre-defined basic actions with + everyday objects.\",\"id\":\"HuggingFaceM4/something_something_v2\"},{\"description\":\"This + dataset consists of text-video pairs and contains noisy samples with irrelevant + video descriptions\",\"id\":\"HuggingFaceM4/webvid\"},{\"description\":\"A + dataset of short Flickr videos for the temporal localization of events with + descriptions.\",\"id\":\"iejMac/CLIP-DiDeMo\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"Darth + Vader is surfing on the waves.\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"text-to-video-output.gif\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"Inception + Score uses an image classification model that predicts class labels and evaluates + how distinct and diverse the images are. A higher score indicates better video + generation.\",\"id\":\"is\"},{\"description\":\"Frechet Inception Distance + uses an image classification model to obtain image embeddings. The metric + compares mean and standard deviation of the embeddings of real and generated + images. A smaller score indicates better video generation.\",\"id\":\"fid\"},{\"description\":\"Frechet + Video Distance uses a model that captures coherence for changes in frames + and the quality of each frame. A smaller score indicates better video generation.\",\"id\":\"fvd\"},{\"description\":\"CLIPSIM + measures similarity between video frames and text using an image-text similarity + model. A higher score indicates better video generation.\",\"id\":\"clipsim\"}],\"models\":[{\"description\":\"A + strong model for consistent video generation.\",\"id\":\"rain1011/pyramid-flow-sd3\"},{\"description\":\"A + robust model for text-to-video generation.\",\"id\":\"VideoCrafter/VideoCrafter2\"},{\"description\":\"A + cutting-edge text-to-video generation model.\",\"id\":\"TIGER-Lab/T2V-Turbo-V2\"}],\"spaces\":[{\"description\":\"An + application that generates video from text.\",\"id\":\"VideoCrafter/VideoCrafter\"},{\"description\":\"Consistent + video generation application.\",\"id\":\"TIGER-Lab/T2V-Turbo-V2\"},{\"description\":\"A + cutting edge video generation application.\",\"id\":\"Pyramid-Flow/pyramid-flow\"}],\"summary\":\"Text-to-video + models can be used in any application that requires generating consistent + sequence of images from text. \",\"widgetModels\":[],\"id\":\"text-to-video\",\"label\":\"Text-to-Video\",\"libraries\":[\"diffusers\"]},\"token-classification\":{\"datasets\":[{\"description\":\"A + widely used dataset useful to benchmark named entity recognition models.\",\"id\":\"eriktks/conll2003\"},{\"description\":\"A + multilingual dataset of Wikipedia articles annotated for named entity recognition + in over 150 different languages.\",\"id\":\"unimelb-nlp/wikiann\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"My + name is Omar and I live in Z\xFCrich.\",\"type\":\"text\"}],\"outputs\":[{\"text\":\"My + name is Omar and I live in Z\xFCrich.\",\"tokens\":[{\"type\":\"PERSON\",\"start\":11,\"end\":15},{\"type\":\"GPE\",\"start\":30,\"end\":36}],\"type\":\"text-with-tokens\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + robust performance model to identify people, locations, organizations and + names of miscellaneous entities.\",\"id\":\"dslim/bert-base-NER\"},{\"description\":\"A + strong model to identify people, locations, organizations and names in multiple + languages.\",\"id\":\"FacebookAI/xlm-roberta-large-finetuned-conll03-english\"},{\"description\":\"A + token classification model specialized on medical entity recognition.\",\"id\":\"blaze999/Medical-NER\"},{\"description\":\"Flair + models are typically the state of the art in named entity recognition tasks.\",\"id\":\"flair/ner-english\"}],\"spaces\":[{\"description\":\"An + application that can recognizes entities, extracts noun chunks and recognizes + various linguistic features of each token.\",\"id\":\"spacy/gradio_pipeline_visualizer\"}],\"summary\":\"Token + classification is a natural language understanding task in which a label is + assigned to some tokens in a text. Some popular token classification subtasks + are Named Entity Recognition (NER) and Part-of-Speech (PoS) tagging. NER models + could be trained to identify specific entities in a text, such as dates, individuals + and places; and PoS tagging would identify, for example, which words in a + text are verbs, nouns, and punctuation marks.\",\"widgetModels\":[\"FacebookAI/xlm-roberta-large-finetuned-conll03-english\"],\"youtubeId\":\"wVHdVlPScxA\",\"id\":\"token-classification\",\"label\":\"Token + Classification\",\"libraries\":[\"adapter-transformers\",\"flair\",\"spacy\",\"span-marker\",\"stanza\",\"transformers\",\"transformers.js\"]},\"translation\":{\"canonicalId\":\"text2text-generation\",\"datasets\":[{\"description\":\"A + dataset of copyright-free books translated into 16 different languages.\",\"id\":\"Helsinki-NLP/opus_books\"},{\"description\":\"An + example of translation between programming languages. This dataset consists + of functions in Java and C#.\",\"id\":\"google/code_x_glue_cc_code_to_code_trans\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"My + name is Omar and I live in Z\xFCrich.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Output\",\"content\":\"Mein + Name ist Omar und ich wohne in Z\xFCrich.\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"BLEU + score is calculated by counting the number of shared single or subsequent + tokens between the generated sequence and the reference. Subsequent n tokens + are called \u201Cn-grams\u201D. Unigram refers to a single token while bi-gram + refers to token pairs and n-grams refer to n subsequent tokens. The score + ranges from 0 to 1, where 1 means the translation perfectly matched and 0 + did not match at all\",\"id\":\"bleu\"},{\"description\":\"\",\"id\":\"sacrebleu\"}],\"models\":[{\"description\":\"Very + powerful model that can translate many languages between each other, especially + low-resource languages.\",\"id\":\"facebook/nllb-200-1.3B\"},{\"description\":\"A + general-purpose Transformer that can be used to translate from English to + German, French, or Romanian.\",\"id\":\"google-t5/t5-base\"}],\"spaces\":[{\"description\":\"An + application that can translate between 100 languages.\",\"id\":\"Iker/Translate-100-languages\"},{\"description\":\"An + application that can translate between many languages.\",\"id\":\"Geonmo/nllb-translation-demo\"}],\"summary\":\"Translation + is the task of converting text from one language to another.\",\"widgetModels\":[\"facebook/mbart-large-50-many-to-many-mmt\"],\"youtubeId\":\"1JvfrvZgi6c\",\"id\":\"translation\",\"label\":\"Translation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"unconditional-image-generation\":{\"datasets\":[{\"description\":\"The + CIFAR-100 dataset consists of 60000 32x32 colour images in 100 classes, with + 600 images per class.\",\"id\":\"cifar100\"},{\"description\":\"Multiple images + of celebrities, used for facial expression translation.\",\"id\":\"CelebA\"}],\"demo\":{\"inputs\":[{\"label\":\"Seed\",\"content\":\"42\",\"type\":\"text\"},{\"label\":\"Number + of images to generate:\",\"content\":\"4\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"unconditional-image-generation-output.jpeg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + inception score (IS) evaluates the quality of generated images. It measures + the diversity of the generated images (the model predictions are evenly distributed + across all possible labels) and their 'distinction' or 'sharpness' (the model + confidently predicts a single label for each image).\",\"id\":\"Inception + score (IS)\"},{\"description\":\"The Fr\xE9chet Inception Distance (FID) evaluates + the quality of images created by a generative model by calculating the distance + between feature vectors for real and generated images.\",\"id\":\"Fre\u0107het + Inception Distance (FID)\"}],\"models\":[{\"description\":\"High-quality image + generation model trained on the CIFAR-10 dataset. It synthesizes images of + the ten classes presented in the dataset using diffusion probabilistic models, + a class of latent variable models inspired by considerations from nonequilibrium + thermodynamics.\",\"id\":\"google/ddpm-cifar10-32\"},{\"description\":\"High-quality + image generation model trained on the 256x256 CelebA-HQ dataset. It synthesizes + images of faces using diffusion probabilistic models, a class of latent variable + models inspired by considerations from nonequilibrium thermodynamics.\",\"id\":\"google/ddpm-celebahq-256\"}],\"spaces\":[{\"description\":\"An + application that can generate realistic faces.\",\"id\":\"CompVis/celeba-latent-diffusion\"}],\"summary\":\"Unconditional + image generation is the task of generating images with no condition in any + context (like a prompt text or another image). Once trained, the model will + create images that resemble its training data distribution.\",\"widgetModels\":[\"\"],\"youtubeId\":\"\",\"id\":\"unconditional-image-generation\",\"label\":\"Unconditional + Image Generation\",\"libraries\":[\"diffusers\"]},\"video-text-to-text\":{\"datasets\":[{\"description\":\"Multiple-choice + questions and answers about videos.\",\"id\":\"lmms-lab/Video-MME\"},{\"description\":\"A + dataset of instructions and question-answer pairs about videos.\",\"id\":\"lmms-lab/VideoChatGPT\"},{\"description\":\"Large + video understanding dataset.\",\"id\":\"HuggingFaceFV/finevideo\"}],\"demo\":{\"inputs\":[{\"filename\":\"video-text-to-text-input.gif\",\"type\":\"img\"},{\"label\":\"Text + Prompt\",\"content\":\"What is happening in this video?\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"The + video shows a series of images showing a fountain with water jets and a variety + of colorful flowers and butterflies in the background.\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + robust video-text-to-text model that can take in image and video inputs.\",\"id\":\"llava-hf/llava-onevision-qwen2-72b-ov-hf\"},{\"description\":\"Large + and powerful video-text-to-text model that can take in image and video inputs.\",\"id\":\"llava-hf/LLaVA-NeXT-Video-34B-hf\"}],\"spaces\":[{\"description\":\"An + application to chat with a video-text-to-text model.\",\"id\":\"llava-hf/video-llava\"},{\"description\":\"A + leaderboard for various video-text-to-text models.\",\"id\":\"opencompass/openvlm_video_leaderboard\"}],\"summary\":\"Video-text-to-text + models take in a video and a text prompt and output text. These models are + also called video-language models.\",\"widgetModels\":[\"\"],\"youtubeId\":\"\",\"id\":\"video-text-to-text\",\"label\":\"Video-Text-to-Text\",\"libraries\":[\"transformers\"]},\"visual-question-answering\":{\"datasets\":[{\"description\":\"A + widely used dataset containing questions (with answers) about images.\",\"id\":\"Graphcore/vqa\"},{\"description\":\"A + dataset to benchmark visual reasoning based on text in images.\",\"id\":\"facebook/textvqa\"}],\"demo\":{\"inputs\":[{\"filename\":\"elephant.jpeg\",\"type\":\"img\"},{\"label\":\"Question\",\"content\":\"What + is in this image?\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"elephant\",\"score\":0.97},{\"label\":\"elephants\",\"score\":0.06},{\"label\":\"animal\",\"score\":0.003}]}]},\"isPlaceholder\":false,\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"Measures + how much a predicted answer differs from the ground truth based on the difference + in their semantic meaning.\",\"id\":\"wu-palmer similarity\"}],\"models\":[{\"description\":\"A + visual question answering model trained to convert charts and plots to text.\",\"id\":\"google/deplot\"},{\"description\":\"A + visual question answering model trained for mathematical reasoning and chart + derendering from images.\",\"id\":\"google/matcha-base\"},{\"description\":\"A + strong visual question answering that answers questions from book covers.\",\"id\":\"google/pix2struct-ocrvqa-large\"}],\"spaces\":[{\"description\":\"An + application that compares visual question answering models across different + tasks.\",\"id\":\"merve/pix2struct\"},{\"description\":\"An application that + can answer questions based on images.\",\"id\":\"nielsr/vilt-vqa\"},{\"description\":\"An + application that can caption images and answer questions about a given image. + \",\"id\":\"Salesforce/BLIP\"},{\"description\":\"An application that can + caption images and answer questions about a given image. \",\"id\":\"vumichien/Img2Prompt\"}],\"summary\":\"Visual + Question Answering is the task of answering open-ended questions based on + an image. They output natural language responses to natural language questions.\",\"widgetModels\":[\"dandelin/vilt-b32-finetuned-vqa\"],\"youtubeId\":\"\",\"id\":\"visual-question-answering\",\"label\":\"Visual + Question Answering\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"zero-shot-classification\":{\"datasets\":[{\"description\":\"A + widely used dataset used to benchmark multiple variants of text classification.\",\"id\":\"nyu-mll/glue\"},{\"description\":\"The + Multi-Genre Natural Language Inference (MultiNLI) corpus is a crowd-sourced + collection of 433k sentence pairs annotated with textual entailment information.\",\"id\":\"nyu-mll/multi_nli\"},{\"description\":\"FEVER + is a publicly available dataset for fact extraction and verification against + textual sources.\",\"id\":\"fever/fever\"}],\"demo\":{\"inputs\":[{\"label\":\"Text + Input\",\"content\":\"Dune is the best movie ever.\",\"type\":\"text\"},{\"label\":\"Candidate + Labels\",\"content\":\"CINEMA, ART, MUSIC\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"CINEMA\",\"score\":0.9},{\"label\":\"ART\",\"score\":0.1},{\"label\":\"MUSIC\",\"score\":0}]}]},\"metrics\":[],\"models\":[{\"description\":\"Powerful + zero-shot text classification model.\",\"id\":\"facebook/bart-large-mnli\"},{\"description\":\"Powerful + zero-shot multilingual text classification model that can accomplish multiple + tasks.\",\"id\":\"MoritzLaurer/mDeBERTa-v3-base-xnli-multilingual-nli-2mil7\"}],\"spaces\":[],\"summary\":\"Zero-shot + text classification is a task in natural language processing where a model + is trained on a set of labeled examples but is then able to classify new examples + from previously unseen classes.\",\"widgetModels\":[\"facebook/bart-large-mnli\"],\"id\":\"zero-shot-classification\",\"label\":\"Zero-Shot + Classification\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"zero-shot-image-classification\":{\"datasets\":[{\"description\":\"\",\"id\":\"\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-classification-input.jpeg\",\"type\":\"img\"},{\"label\":\"Classes\",\"content\":\"cat, + dog, bird\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Cat\",\"score\":0.664},{\"label\":\"Dog\",\"score\":0.329},{\"label\":\"Bird\",\"score\":0.008}]}]},\"metrics\":[{\"description\":\"Computes + the number of times the correct label appears in top K labels predicted\",\"id\":\"top-K + accuracy\"}],\"models\":[{\"description\":\"Robust image classification model + trained on publicly available image-caption data.\",\"id\":\"openai/clip-vit-base-patch16\"},{\"description\":\"Strong + zero-shot image classification model.\",\"id\":\"google/siglip-so400m-patch14-224\"},{\"description\":\"Small + yet powerful zero-shot image classification model that can run on edge devices.\",\"id\":\"apple/MobileCLIP-S1-OpenCLIP\"},{\"description\":\"Strong + image classification model for biomedical domain.\",\"id\":\"microsoft/BiomedCLIP-PubMedBERT_256-vit_base_patch16_224\"}],\"spaces\":[{\"description\":\"An + application that leverages zero-shot image classification to find best captions + to generate an image. \",\"id\":\"pharma/CLIP-Interrogator\"},{\"description\":\"An + application to compare different zero-shot image classification models. \",\"id\":\"merve/compare_clip_siglip\"}],\"summary\":\"Zero-shot + image classification is the task of classifying previously unseen classes + during training of a model.\",\"widgetModels\":[\"google/siglip-so400m-patch14-224\"],\"youtubeId\":\"\",\"id\":\"zero-shot-image-classification\",\"label\":\"Zero-Shot + Image Classification\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"zero-shot-object-detection\":{\"datasets\":[],\"demo\":{\"inputs\":[{\"filename\":\"zero-shot-object-detection-input.jpg\",\"type\":\"img\"},{\"label\":\"Classes\",\"content\":\"cat, + dog, bird\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"zero-shot-object-detection-output.jpg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + Average Precision (AP) metric is the Area Under the PR Curve (AUC-PR). It + is calculated for each class separately\",\"id\":\"Average Precision\"},{\"description\":\"The + Mean Average Precision (mAP) metric is the overall average of the AP values\",\"id\":\"Mean + Average Precision\"},{\"description\":\"The AP\u03B1 metric is the Average + Precision at the IoU threshold of a \u03B1 value, for example, AP50 and AP75\",\"id\":\"AP\u03B1\"}],\"models\":[{\"description\":\"Solid + zero-shot object detection model.\",\"id\":\"IDEA-Research/grounding-dino-base\"},{\"description\":\"Cutting-edge + zero-shot object detection model.\",\"id\":\"google/owlv2-base-patch16-ensemble\"}],\"spaces\":[{\"description\":\"A + demo to try the state-of-the-art zero-shot object detection model, OWLv2.\",\"id\":\"merve/owlv2\"},{\"description\":\"A + demo that combines a zero-shot object detection and mask generation model + for zero-shot segmentation.\",\"id\":\"merve/OWLSAM\"}],\"summary\":\"Zero-shot + object detection is a computer vision task to detect objects and their classes + in images, without any prior training or knowledge of the classes. Zero-shot + object detection models receive an image as input, as well as a list of candidate + classes, and output the bounding boxes and labels where the objects have been + detected.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"zero-shot-object-detection\",\"label\":\"Zero-Shot + Object Detection\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"text-to-3d\":{\"datasets\":[{\"description\":\"A + large dataset of over 10 million 3D objects.\",\"id\":\"allenai/objaverse-xl\"},{\"description\":\"Descriptive + captions for 3D objects in Objaverse.\",\"id\":\"tiange/Cap3D\"}],\"demo\":{\"inputs\":[{\"label\":\"Prompt\",\"content\":\"a + cat statue\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Result\",\"content\":\"text-to-3d-3d-output-filename.glb\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"Text-to-3D + mesh model by OpenAI\",\"id\":\"openai/shap-e\"},{\"description\":\"Generative + 3D gaussian splatting model.\",\"id\":\"ashawkey/LGM\"}],\"spaces\":[{\"description\":\"Text-to-3D + demo with mesh outputs.\",\"id\":\"hysts/Shap-E\"},{\"description\":\"Text/image-to-3D + demo with splat outputs.\",\"id\":\"ashawkey/LGM\"}],\"summary\":\"Text-to-3D + models take in text input and produce 3D output.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"text-to-3d\",\"label\":\"Text-to-3D\",\"libraries\":[\"diffusers\"]},\"image-to-3d\":{\"datasets\":[{\"description\":\"A + large dataset of over 10 million 3D objects.\",\"id\":\"allenai/objaverse-xl\"},{\"description\":\"A + dataset of isolated object images for evaluating image-to-3D models.\",\"id\":\"dylanebert/iso3d\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-to-3d-image-input.png\",\"type\":\"img\"}],\"outputs\":[{\"label\":\"Result\",\"content\":\"image-to-3d-3d-output-filename.glb\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"Fast + image-to-3D mesh model by Tencent.\",\"id\":\"TencentARC/InstantMesh\"},{\"description\":\"Fast + image-to-3D mesh model by StabilityAI\",\"id\":\"stabilityai/TripoSR\"},{\"description\":\"A + scaled up image-to-3D mesh model derived from TripoSR.\",\"id\":\"hwjiang/Real3D\"},{\"description\":\"Generative + 3D gaussian splatting model.\",\"id\":\"ashawkey/LGM\"}],\"spaces\":[{\"description\":\"Leaderboard + to evaluate image-to-3D models.\",\"id\":\"dylanebert/3d-arena\"},{\"description\":\"Image-to-3D + demo with mesh outputs.\",\"id\":\"TencentARC/InstantMesh\"},{\"description\":\"Image-to-3D + demo with mesh outputs.\",\"id\":\"stabilityai/TripoSR\"},{\"description\":\"Image-to-3D + demo with mesh outputs.\",\"id\":\"hwjiang/Real3D\"},{\"description\":\"Image-to-3D + demo with splat outputs.\",\"id\":\"dylanebert/LGM-mini\"}],\"summary\":\"Image-to-3D + models take in image input and produce 3D output.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"image-to-3d\",\"label\":\"Image-to-3D\",\"libraries\":[\"diffusers\"]}}" + headers: + Access-Control-Allow-Origin: + - https://huggingface.co + Access-Control-Expose-Headers: + - X-Repo-Commit,X-Request-Id,X-Error-Code,X-Error-Message,X-Total-Count,ETag,Link,Accept-Ranges,Content-Range + Connection: + - keep-alive + Content-Length: + - '73726' + Content-Type: + - application/json; charset=utf-8 + Date: + - Thu, 31 Oct 2024 13:57:47 GMT + ETag: + - W/"11ffe-LYeRHzSmA1Ja2XTx5UWNPygcpnc" + Referrer-Policy: + - strict-origin-when-cross-origin + Vary: + - Origin + Via: + - 1.1 57e244444474e188f70fa1e57528d574.cloudfront.net (CloudFront) + X-Amz-Cf-Id: + - u7De83WV8t2U-7tozaNWzRh79H4Jr-05aIpWiXlNAiKf2sbUHbUV2Q== + X-Amz-Cf-Pop: + - CCU50-P1 + X-Cache: + - Miss from cloudfront + X-Powered-By: + - huggingface-moon + X-Request-Id: + - Root=1-67238cdb-69983a946a10c04b648a978d;d8022c25-9547-4764-a1c7-e4dad24723ca + cross-origin-opener-policy: + - same-origin + status: + code: 200 + message: OK +- request: + body: null + headers: + user-agent: + - unknown/None; hf_hub/0.26.2; python/3.9.12; torch/2.4.1 + method: POST + uri: https://api-inference.huggingface.co/models/FacebookAI/xlm-roberta-large-finetuned-conll03-english + response: + body: + string: '[{"entity_group":"PER","score":0.9999532103538513,"word":"Sarah Jessica + Parker","start":11,"end":31},{"entity_group":"PER","score":0.9999523162841797,"word":"Jessica","start":52,"end":59}]' + headers: + Access-Control-Allow-Credentials: + - 'true' + Connection: + - keep-alive + Content-Length: + - '188' + Content-Type: + - application/json + Date: + - Thu, 31 Oct 2024 13:57:50 GMT + Vary: + - Origin, Access-Control-Request-Method, Access-Control-Request-Headers + x-compute-time: + - '0.118' + x-compute-type: + - cache + x-request-id: + - RDjmZSROtmCXoxahUfWnj + x-sha: + - 18f95e9924f3f452df09cc90945073906ef18f1e + status: + code: 200 + message: OK +version: 1 diff --git a/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_translation.yaml b/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_translation.yaml new file mode 100644 index 00000000000..551219d4d80 --- /dev/null +++ b/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_translation.yaml @@ -0,0 +1,48 @@ +interactions: +- request: + body: '{"inputs": "My name is Wolfgang and I live in Berlin"}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate, br, zstd + Connection: + - keep-alive + Content-Length: + - '54' + Content-Type: + - application/json + X-Amzn-Trace-Id: + - 37a2939e-3fa4-4a84-8575-497219ce20fe + user-agent: + - unknown/None; hf_hub/0.26.2; python/3.9.12; torch/2.4.1 + method: POST + uri: https://api-inference.huggingface.co/models/Helsinki-NLP/opus-mt-en-fr + response: + body: + string: "[{\"translation_text\":\"Je m'appelle Wolfgang et je vis \xE0 Berlin.\"}]" + headers: + Connection: + - keep-alive + Content-Length: + - '67' + Content-Type: + - application/json + Date: + - Thu, 31 Oct 2024 14:12:14 GMT + access-control-allow-credentials: + - 'true' + vary: + - Origin, Access-Control-Request-Method, Access-Control-Request-Headers + x-compute-time: + - '0.237' + x-compute-type: + - cache + x-request-id: + - roM0t_ythCsh_GC2BTGjR + x-sha: + - dd7f6540a7a48a7f4db59e5c0b9c42c8eea67f18 + status: + code: 200 + message: OK +version: 1 diff --git a/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_visual_question_answering.yaml b/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_visual_question_answering.yaml new file mode 100644 index 00000000000..374212db9ab --- /dev/null +++ b/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_visual_question_answering.yaml @@ -0,0 +1,1958 @@ +interactions: +- request: + body: null + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate, br, zstd + Connection: + - keep-alive + User-Agent: + - python-requests/2.32.3 + X-Amzn-Trace-Id: + - dd55291e-a91f-45dc-9e5e-c2e98ef87a4a + method: GET + uri: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg + response: + body: + string: !!binary | + /9j/4AAQSkZJRgABAQEBLAEsAAD/4QB8RXhpZgAASUkqAAgAAAACAA4BAgBHAAAAJgAAAJiCAgAH + AAAAbQAAAAAAAABTaWJlcmlhbiB0aWdlciAoUGFudGhlcmEgdGlncmlzIGFsdGFpY2EpLCBhbHNv + IGtub3duIGFzIHRoZSBBbXVyIHRpZ2VyLndyYW5nZWz/4QV4aHR0cDovL25zLmFkb2JlLmNvbS94 + YXAvMS4wLwA8P3hwYWNrZXQgYmVnaW49Iu+7vyIgaWQ9Ilc1TTBNcENlaGlIenJlU3pOVGN6a2M5 + ZCI/Pgo8eDp4bXBtZXRhIHhtbG5zOng9ImFkb2JlOm5zOm1ldGEvIj4KCTxyZGY6UkRGIHhtbG5z + OnJkZj0iaHR0cDovL3d3dy53My5vcmcvMTk5OS8wMi8yMi1yZGYtc3ludGF4LW5zIyI+CgkJPHJk + ZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIgeG1sbnM6cGhvdG9zaG9wPSJodHRwOi8vbnMuYWRv + YmUuY29tL3Bob3Rvc2hvcC8xLjAvIiB4bWxuczpJcHRjNHhtcENvcmU9Imh0dHA6Ly9pcHRjLm9y + Zy9zdGQvSXB0YzR4bXBDb3JlLzEuMC94bWxucy8iICAgeG1sbnM6R2V0dHlJbWFnZXNHSUZUPSJo + dHRwOi8veG1wLmdldHR5aW1hZ2VzLmNvbS9naWZ0LzEuMC8iIHhtbG5zOmRjPSJodHRwOi8vcHVy + bC5vcmcvZGMvZWxlbWVudHMvMS4xLyIgeG1sbnM6cGx1cz0iaHR0cDovL25zLnVzZXBsdXMub3Jn + L2xkZi94bXAvMS4wLyIgIHhtbG5zOmlwdGNFeHQ9Imh0dHA6Ly9pcHRjLm9yZy9zdGQvSXB0YzR4 + bXBFeHQvMjAwOC0wMi0yOS8iIHhtbG5zOnhtcFJpZ2h0cz0iaHR0cDovL25zLmFkb2JlLmNvbS94 + YXAvMS4wL3JpZ2h0cy8iIGRjOlJpZ2h0cz0id3JhbmdlbCIgcGhvdG9zaG9wOkNyZWRpdD0iR2V0 + dHkgSW1hZ2VzL2lTdG9ja3Bob3RvIiBHZXR0eUltYWdlc0dJRlQ6QXNzZXRJRD0iNjI3NTQwMzg2 + IiB4bXBSaWdodHM6V2ViU3RhdGVtZW50PSJodHRwczovL3d3dy5pc3RvY2twaG90by5jb20vbGVn + YWwvbGljZW5zZS1hZ3JlZW1lbnQ/dXRtX21lZGl1bT1vcmdhbmljJmFtcDt1dG1fc291cmNlPWdv + b2dsZSZhbXA7dXRtX2NhbXBhaWduPWlwdGN1cmwiID4KPGRjOmNyZWF0b3I+PHJkZjpTZXE+PHJk + ZjpsaT53cmFuZ2VsPC9yZGY6bGk+PC9yZGY6U2VxPjwvZGM6Y3JlYXRvcj48ZGM6ZGVzY3JpcHRp + b24+PHJkZjpBbHQ+PHJkZjpsaSB4bWw6bGFuZz0ieC1kZWZhdWx0Ij5TaWJlcmlhbiB0aWdlciAo + UGFudGhlcmEgdGlncmlzIGFsdGFpY2EpLCBhbHNvIGtub3duIGFzIHRoZSBBbXVyIHRpZ2VyLjwv + cmRmOmxpPjwvcmRmOkFsdD48L2RjOmRlc2NyaXB0aW9uPgo8cGx1czpMaWNlbnNvcj48cmRmOlNl + cT48cmRmOmxpIHJkZjpwYXJzZVR5cGU9J1Jlc291cmNlJz48cGx1czpMaWNlbnNvclVSTD5odHRw + czovL3d3dy5pc3RvY2twaG90by5jb20vcGhvdG8vbGljZW5zZS1nbTYyNzU0MDM4Ni0/dXRtX21l + ZGl1bT1vcmdhbmljJmFtcDt1dG1fc291cmNlPWdvb2dsZSZhbXA7dXRtX2NhbXBhaWduPWlwdGN1 + cmw8L3BsdXM6TGljZW5zb3JVUkw+PC9yZGY6bGk+PC9yZGY6U2VxPjwvcGx1czpMaWNlbnNvcj4K + CQk8L3JkZjpEZXNjcmlwdGlvbj4KCTwvcmRmOlJERj4KPC94OnhtcG1ldGE+Cjw/eHBhY2tldCBl + bmQ9InciPz4K/+0AnlBob3Rvc2hvcCAzLjAAOEJJTQQEAAAAAACBHAJQAAd3cmFuZ2VsHAJ4AEdT + aWJlcmlhbiB0aWdlciAoUGFudGhlcmEgdGlncmlzIGFsdGFpY2EpLCBhbHNvIGtub3duIGFzIHRo + ZSBBbXVyIHRpZ2VyLhwCdAAHd3JhbmdlbBwCbgAYR2V0dHkgSW1hZ2VzL2lTdG9ja3Bob3RvAP/b + AEMACgcHCAcGCggICAsKCgsOGBAODQ0OHRUWERgjHyUkIh8iISYrNy8mKTQpISIwQTE0OTs+Pj4l + LkRJQzxINz0+O//bAEMBCgsLDg0OHBAQHDsoIig7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7 + Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7O//CABEIAZgCZAMBEQACEQEDEQH/xAAaAAADAQEBAQAAAAAA + AAAAAAACAwQBBQAG/8QAGQEBAQEBAQEAAAAAAAAAAAAAAAECAwQF/9oADAMBAAIQAxAAAAH5rncs + dFHOOxPZnoC1NS9Sd3EONH5lWWwFNk9SN6MLGX4lOYu3nddL1ehyzZzit2LrQ1SVKdHjzC32IzpZ + skq2TOumamXKoEZbsDityPyl+uB3K5N3TzRxH2Hxc/vqe1VDoRXyntzMgtsuU5Ttuw5/otfGdTyZ + CpPbvRuVXMeZjImSqsm6VG7iNkKG5rhdoaMk8mKYecnlorVTqvzKcwpUaqNt6a20c5HGW+aH0g1L + 0Yp4O1ouhdyuTT3Gl2IbqkzhM7TEyNlXqqhu3ob5kPpsuqOjYszn2Qkm9dC5VEN27Kfbu/Py3EXp + P7NEMyfluQWLswCBpeqvQkdD8nZK1Zt1uc4ZaannNGMjU+tYbT8wVyE9LnTR1uYmT3ONzA3FWjDM + U/RowJgETL49KjpehJmJlmYbx0FM6Zo6TKlmy5s5k9LP0MkMl3oTSqyjywsJ+t8dXxyLveN7r1uI + wrSzNgbMzMACjNJOmgp+ZTg6QFXpshU2BtFHZBSK8auU3MdmbsrpteteQEyQ+MyxNZlTrQdaUbML + ArwGTOkPndBT3C5mjo30xkywlu5FVm0ZUbygk1pNpyCVZVeaHHrDwXuy9hZVa16w4LMGig8xFJ1W + plJtAOChkHGUdmb07W3YsswMwFqqdiHC9UR8lNg9NJ1sLBk8HnSsZUxstnbSwYPz4T0Z0aesPUzl + r2Wbe89GA7PaVJlRZ0jpcL0ySK6TQqeZlNwt4T1nuh8Bue8+m84Xp0cFmHE+mpsTbHq6JNjYyAsN + WSavtK+26wFXy6T45qcgDac2rQWfZm6yW6Or63M23HW7eF6nJzlOMVddeyRRePId4HRtM1n1Dz0H + O51Z5ydvWaGMuk2glem+aTdg87D3vg48aeptdKY3LfJZ+qLrrv8ALI2jkEqNGzIUrZqgJo4FckAZ + TAFdqdLp0wcRlHHqXPNGqnZOdRaT2XRZkGsJuQV2eg9eYWXV6Tn5w1RzJN33OZie7QkHb1k/m0y3 + OqPFfYUngsX2Lmm1d0wmWfWlRPVaeqPOh0CtjqzLOcHrU4Dq9bhJ7RoVyG5ynVHUdAgmwq0a00aa + j1u69IbpaHF3PdfHrye3OnOoN46HLpyN56vPc3Xm3NXXT5b53bkrpi/U8tUkni4j1uJD22ck/Ob0 + m6ZrPqTw1q51SZtuciIzrDpdJPmgZi17wvVmxZtVelrOxFrQwVnUR0zDnS+dCutCgKZpuG5gUaPg + oULXLfagUcPkMyg6bzfTZasaoxrkWdaV/blw5Xce0+s5Yuzr7xZy6caXo2I3lOs1U2Zl8/LInso5 + 3wpYvRG7m2DoOdJzpdbVEzLNLxbtZCpFZlubTYO5PbPKrSiGXMrWmJtvcZ5Mu8qeZTpsjKYh5vo8 + mjMzFCgUKHpMjYbDLDscvK69u1y7KETUXTn93vnLqSc+nE4dwsYUx9D6fM3U+f4dYJutMsi68w48 + l8YzmX2mYr9UcPdIaFGaK6VG9SHU1n0TSzYt2pzs3MqtnooVoyoc1VtPNvTKlYnjTGggsL/NC7x0 + lUmi5RrNNhKtyZEwjrGalPNNb631j5Ksh12J3msCh6c+rrKZe1c/NeX1CErk+j9Xl5K9Q+Mxvr8u + lNkW+VeuPL4JsXLnLbqXDQcQaHVn5nehLu3JmTIyknKzrdOhY9IVyyOa9KeY1fdMrCPVkqxS15mH + X45dmO0WqVUgm26NyPEyh6FhcRbZ0IViXcQ9O/L7a6ONlKHo4OuelKedfJ8u1vPbFi3j6XtwdvHD + pfPpdy6c/eehjCNcSxnn8rJ2ZVWQZJ7xuW+bWbiw93esSXYWIWnPOPd9LOXyHtybqmyHOszWIVL1 + WMhb6TROdaU3PrOrwbDICPVuouULaOMZ0DiL3d0WZwPQupFr8mzpw/V0cS6nQ576XTnJVyUc+nzn + LrXnTlg1ntdeLO3JKozqLNSV510ufJLlBKnTctoY9ubRca+5kzp/SWayuhWfN1I91kVSJrm63iW6 + ks1JLYy6JdU0E2E2rWnD3SMk6nnhjaGFQ3UFTF5p5jUXoYGyuQudb2lHQc0F9PA6569zyVtxvdYp + skzotSLh3tzqmOVvNnbiclG5z87ugd84pv6vneM5T8+SskkmqvpdsflmbsMHl+s8u0NxOLmgr3M5 + WnK3qFskdQi5aGStAxMgSTWqcF6jQ07XnyA+EZrNQICWvQKEYZBIyyTF9wre8q2Zes++3G64+k1j + jTXXxrk8+jdTsWfF2X8erVFNOb25/ZM8Wa9nVvbj09Y5ud9nN43PmfPnz+d5utZqbkvvPavWzmXn + qfFf2maLTLPUvNOOvM5XNuoda9LfMRNLtpmWywb0TOiK2XUGnhydLz5cBwvgpGdU+a/Yz1qxaOSr + U8qORtnuWq/V2Z2vB1Ovcc5foZflM7u4d8l5esUzVUoJRLzd56OdM1lffh2N84K5mdfTZ0POfPef + j6JOldYEsXRN0tucDm+5kejWpdYKDAy6e5Wu5l1Q6WaXpZxzptWqySrm9LzfQqmefrbZGsnDLdjp + efC+NKPQ3IvRFYrIf1jyLBnQvFLWX5uGwvjro+/rV2z86vds+Ql+4Tn51Ny7TY3EjlqjRyzanod0 + 52duDtRZxJfqYozflvHjn4y7tDs5vTWZpZDZuaeIHW87vq1LLPTKc2fNeMijobXPUsZRjS91snkX + aqqcyPevajczQ1Lmu45DB0FTMk07rHYjLfZS07LclwGBdj7adWj19at55ddCT4lq0uD59F8e0VhR + ZK0eSamyl15S9+NCWlpWNzebxny/mw3rEaSar+SXevWGbzL6Jt6KrbllBmJ56GD0PEHdu2VXOlbg + +ZRu4i7U24MH5yKhbTiU887l7nR6R9ewZ0hcmDdMs9yo5MNwb6ZZ2V76M1sdOdY9FSieWfnufj3G + wEqlYFU+slnVHXlZ248VfqGZbZTr5sfGfLebDLJetUoYI7V9mx7k9QaorRqPsVpLKObUzmXs0kbu + s5TKR0s+p5VWz25YyGR5DDSrjDzV0XNuxc29TMwUPQoZBYe5MKOsZzdP27f06RVLqco+jJ4lK5rh + 8+nWzeH159bn1ZvmvO09OR8eze/ENY7pza40qpfoksxPk+PNOXs63lJu1l6Wuqs5htA9tmaR6lK3 + MEzopxM5N6Ojc8+am1a8mJzumijUl1Rr0rJPVZI7lNjNC5i0DNKvSM5zdjsbzpZm1RYdDh0PRvod + 9x28yzjW1J1QRcsOdU5vV68/mZu7j6J+3m79zxufSveH6zOfPS0V0JfoJK0+T45jzkuOpxHQOxy7 + TNZkUrRjy+zMjNUNPGWUZPROK/obIVk8vsF7rAKh1V2nkvS3MbyejaNBXIZBYbMlD+kOQuZelVK4 + jS/vet6+qFl1OOLO/HFp8qCs6ljLPmZr6m5hl5cu6lJRGHrOiNiHL5nllXOU4pWS6oWq3JVt1Krm + PO0wWCdVHRRiBp6XEPVyt5DgNHs5KOb42PVJ2BXl0qzk5sswdVucjijswZkOclge806mc6vm90jO + ajcr06Hq7t1OQX2UBywEtc0pOrZzzir9Okiwnak6VeNBFjY5eXzHFkz1uOV5cr0axQ0LJOjK6u8x + c7NaKq0xGYBqq0y0pK+ajDELUitNWYmarkEltzZ8glU6EjcwLU2hYUu4nmapEK7Wd5zJdxMhup0b + d9nW+yGrUqjypEnz1cte4ilcj6gEx3Tq2ETAAy1S8zO042MInPeWeb0ysfdLE7npZ7LOMXuDU26I + xMlGvRpVkWI4ezzN6VbdzhWe1ZtViO4M2X0X56CajrZ9AkXKFlGM24jek5c0EjMxgPONOp6Kvt0k + EFdnQFxNUcJXoWdxBPG1KfPSkvUuTBlQdHOsztedRmRtbDVARmszU0wekmucLEl5tsxSsltUrYWb + FGWFvLKulRtbk3ckxqHZw7lCsTvXdu0Zm2no8fWHomzBzH0zaKVsLkHEZrNN3T118gvcuLBRMTL0 + 0eNsMeLMJzkrgmJ1ss6R0Mbi4991lZFYUuo3OkZqa0szahxzd5ZqLvKPXnUKWBU3TIt5QNOpwzNs + rqOXM69ZRUCy6HBjgU+j31wYHYmVY5AXwwNCMNBGGEfPrbcydMcmzoJ0QDTDLJ1A0ckkqAVQKElx + 9NHL4+imCpAFk1MzV5vJ1mqWTWZN5ozfoefSjN5PXHek4O+atcMiBqa1kdHjCsb55z+2k9lPJ4C2 + jcTouBmikTu0p3r0InBGjBqYoC5MSq0VElGo9ZuPa9LuvLianPs7qVEIIFej1ZHqXDCSli5WH1CT + 8+vP59PS12YTahkhzSdOlz21V6zBrI13OXRWst1mo4txLriI6ufiuyzpGZiLQV8gxq06g1gfMlR6 + Mj6nXQUYJWhGV6EqgUEeRyknlIyI+XboXNusH0x8wpWd5GkVIK7MFxi0IglWYml+rQs653LrJnS7 + aYbYInUxJT5/ebuXS6V8T7zLrLc66EvK64+pw5t1kzyNcqbgk5vPWSZsq0j2WJRuvRIrFpwCJugs + vre2yPGGKAgaJEJQSjFvQg1k49qGZt57OstuVV8udbU6IxELqaYrUmqaFLxJr7O5PNlxuXHTmzVW + aobY2xNLs8kcvD3jp43dKjWeP0x0Oe+1jXH68/ozmzV+aR8pvgLJWKlbJ7U9KmF1XqhEtruYK8Fm + N5zvejo+3xowJFLqEAs5FM0yjq2p7HRnPVW8e6Yls7CbXBWSzrJeGYCvkmqOHrws6+hsvRM1NLx+ + XSnn0Bcsm3no5JrResqlljidefVxvoxwOvN819Hz17pzh07EZy6T51PXz3TzixZRZu2IoRyRSjbm + TUG3auZHLta6vtSNMPCAStFqRiKGmrdBoym3Pzc12tYsVJ8gvfsrR4wBVEI5OHNdKO5qBHOWPG+j + jUnLqlV2Q7z0c00k0skn1JZXx891x1Ak4up9jm2axzq6lg8+kPPpz1Ynz+/NRYmVWTQanqhESlGU + ym1ovN+l3vxoYJgZppgkMw6FlSYcuajTu3L65J0rHnzU1bc3KxDrBUqyc5Uv0CX1w5efL9Qis6nx + 05/Lpz6HebcVtgi9LGU16X1z850x2Dgan1MXazNXRQM6lxviZ3iuY+c1yGKJMj2rlyUimjkVqmeU + oozPo+nTxgJpoIRDL6mRSnQr2NLKNZbrHz7XNj6a5trnHTsWvzJ9MmKyzIACuWSZ19Xc8OWSa+pu + WiZeby68vOuVt1eemy7Zli7LB2sosMo3z+dX1hHY1lSuSyVsfJc+yl+kxfj98JtNzMHs+EK+QdFy + io1iZH1t67Y0SuwVnjREpD0Wr8bnzoITXU3yq68+JLw1+1uH1KdCuBNYz2Fyw5csiOfL2iGEr9Cj + AM2bO48bRLudxLWg2T6zbEe50Lnrbx87NOS/eOAQr3rkzmr9NAHy2Oj+PWbU1nlb89GZE0u31eqv + OXSTqMrUHdyOzvdss6tQzx4IMwwMTw7LqSUTqb59DryDWeHNKT6ewQqE+Xzr6S5RLZU9nKlpOic+ + Xt2MzRln59JsbXm5KzUhXTyZvNAnUy56G8dW5+WmmWUXPzrXVudHL3E5APHtzM7QdiOLeXO1yXCd + 7CunnJZyiUZXJ7VboRdejjxhcaYeMBHGZsfHspUjEq1mvWen34Jrgy9RKqliiuPnVespluThKcva + udWjNTjaMbOyHn0CXZWjbk9TnzVCHvHtTo3MGg3Pa1lqfPNVpxaI07KUnHXrYvw80M32eXS1Pl9+ + ZCZrXqtziO32bsg2usGmWdudSG2imhGjhqKVMNmoePUJVjw0Fat46vo8/O1I46dLldYs50rC9PnZ + v6XWKc1edS43Pz6LUhOb4aEMs0SP1HbxPrPb1iegs4i/T3Inzi9FIBxVA1xJrpp8/wAu1HPYHRl5 + PTjDvHpAjDbdTYNPUGVMvU6bYVgngDxh48LFy5w7HKAY8mWpLN82+rz/AC8vTseaYKlXL1JV8es+ + Nrso1nJVzQS6DGGm2CGaV7z0uvLkWXWdGz1nz6rl+hZ481HK6mJlnCOosJdw6/M9M5Lfjd6R3ly9 + xkiawZLqYalMp8r2e+ngixggWr0iJpRpCdDnunl1wcMJ5X6nZ68W9eXxds6X2NXUCXujOXTm8e0i + km05FS6pHj0ZYoAaMq/WL+/B1c6jTpkR8hN3Zo5tFhWPCs5S1JWcWWvl1jzo66G+dWsyS8eF7zcM + UESJhKLj6O6AwA8aOOanH1LM69LNZXHW8/fF9Br6VydDtxt9HD5bO+aDqPimW+zpxzPN6Y1WlB4O + lx408OBFgADtS1Op6fOyznVy178lR89nXJzspVWOGJVSUWdesiPG+WFrN9OTwRMs8rgwwh1igomE + IuXAbOlLzrEGiZV007nn7hKuXB8rynWC9fllr5zOqVOFHUTOHeaUVXZfLqCZSk1SjbFQqsCA1OxZ + 0/R59lWDK0Oz5XOopSUzTQ0osgkvW61qc8Xm+sFTy1GasYYwULDiiopSsaNJzyACvUh5LUAo6vm7 + g0uV8ttzOP1n3o4fOayE1dKywI7Oann0VLqyZu1Ses1EWSakVgSnDlYEiq7VlW8FQwoOyYhJllEp + WOlw9Z43Km28GzixdKusxGQrpTBJ0uorIpWSgErEeTko4dFamKqdJDqefsrO2S6dPrywZrLemPgb + NXoRQtVx1pZefRKzc9qVhiPsXZL1516y+vnM6eX2NlUkiuhyZWEtnhoUYopMvrHGx4aYKlNAryvC + JD2MnddLjyi7bm3Rlyzw3NTTRpSYc4MtlYUpFU4K9Lh19nWygW9OdFlO+bumPiWppWDI61zbZPjp + Py67msrc2SwrIOvNW8dqq0gXkS3SC10rnnmCVwFCChhYKUE6G5LKnJBo1VWHKw8ANJzrkccmK+HM + YD1dCADJUrVQ8pBWVDNiwypqJAlq57bjbDZWazZ05s6c5tT5jG5xkNLNTqmcugZ0QwjzppPrPznb + k5Xx0rKTlL1IfZwJdsaWgSpFytBsIWKq2xyc/N0yq9ZVLPnVBoseGkCvI8lFkn0mpxreardZmh81 + gcr6WmGiilAXK9ICujq42UpgWHrLevPnnBzRUlFLk+gsnzp+dADmjovN5XTEVjCgWtCMNh1QFNbA + L6AEDZao9YnUWrkQFKKZVeoInFFXCzA0wAUrM3qah8MK7Xi2uorkc7EdKyxCavXskWOPIYJzqpj6 + SweXV+8AN3hNfOSzRspqFlMvZuabAl3OhpicuWTUOXyYEZWASssEOxctUricBJwaYUAwlXBgoKrs + LU3GhHizATTQRA+OwnN8+I/Tuzz4V0u9NgYnjwS9BCXkGmnpYdZplbc/TQuhsafOVys2gYuS7Y+E + V27lw0kqHKFq0plUiwQh9bKixgtPSjVAgXBKuzQooF06CWdDXADDx6zD0pig0BZjYEp5Zb5816yO + Env7YCnjVYTJ2F54J4ms2UUXZ0kcBby4mp8rYdUqVTXgETVAqwkTNVLZCDaJFQ9SsJVIkceFmhKM + mqZqCpJoBhVXOCjKplnRwkYqAAhxMYJzPadXw833I5sXv7aToYakeTVMxEqiymVIo1BoTwQY2XQE + aeVVjIWFaSEMlMw8nqnHGjRZZLFWQmx6xoARcvOQzTw1WpsKtenQljTnV9EvEmV22ywJtowoIyR1 + KxOzw5vxP//EACoQAAICAgICAgEFAQEBAQEAAAECAAMREgQhEBMiMUEFFCAjMkIzJDRE/9oACAEB + AAEFAvAECzSazWEQiMPGJiYiiCZmZmFvAHgQRnljZiDMRZ+GjTWYxGHy+0yPCnBezJ2i9zHaiHqE + 9/n4wzQxYfBHn8BptiZzGJ2f7hiuVgInXgPh4Zt83s+E467NMTWawCAfxMImsxMTWBZrD5x5xMTM + MIzEGIBCYWhg8MIrYCjwT14r+z4znxmZ8e47AzMD5hMEz41OcwGEBo481jLdToT/AECIh2RzgHqV + /JZSmtcLBfI/hn+BHgQQCYhEx2Oo0wfIEMMEzNp9wiEeBDGHSscZj+DEHX8CfHcb/YhG0xj+CiDE + cAjXs4EwDGqBmmIFxPqO+YCQNsyj7071BBq1YUhrJbdoGJY+czPk+SIZmBorzeE+R/DMzC38AYO4 + RCvn7gGPB8fnyYcTA8GAZEEP8FOPH3CcRiTN5mD6YhYWz/Clu5kzMq+7b9SuhgqyMeAJiGDw38GW + aTX+Xey1WZNXxZCszDAPJmYjfwPnHRPgDszHgx/OPBg8mbTaKw1Now7Qv0iEzEzLP9eMQSv/AFM+ + E+II7WsA9QCYgEIh8/kDMPUz/Efw46QkRvoNtHrxMQxFZ5oBD65rCjLNoD5A2KUgQBQpqV5ZT6ig + +Z+z5I8AZ8nznE2yfH3BHEFOTriY6x3f4X7Mr+/zkyjt8eXs62doFh8M0zBGMxBGPnaZ8AwmZg+q + +qa2yzVd9I4IM/bbT9piPTcSaWQM1an90sS7WAM6ni5n7awRai0GEjWYK2HKsTOSm1dCQjtl/hj+ + B+1j/GExV68HwTFadYaa6x1NjMmsr1YNWpmhmMQxJVkKxgIw/QXk6jMYzMMA8HwBGSazWEQLCsUT + EVMzHVag8U/GC5oW2CL7K9/UG5N911lwSocXk3ROJWgt0Fvqp5Fb8G1J+5aLzLUYMtksQ7KtcWyu + sFstd3xVJAI7b6I7li6wfXkxfu3vwPowmZij4soycRD3nI+jn5ue1bU5zD2DOp1KW2DDr6hbQe4T + aHwO/wCAEAh8GYn1GMUQDwe5h0hbaet2lVeI7hI1jMaeL7F5jV8RbW/ULJTTdWGodQvEOHXk8Y0c + pyb+GqxS1NwYWjRRPSQ1a932rHfZtumg0M0lgGB1MTHhh0v2x7RczBjeErGGth2JE7z+b2xO433K + /qH7CwrEZlZD7K7HBbXcftgZ34CQLiYms1mPH3CJibT7hWAwTPgdTuvloSYxwQMxf/r5Faai7k8e + o8q2yxQzKmbHPyUqLS3vWvi12VcivnUGptgIG2j9RSGPpYFTqS0z2xlZbGweaT6mIUmJrifbKgVc + SxYqZjfFfFC5axNZ9xhs/Us+8RRgQeMToQ2sibbLU0BBAWBYBNYYTNpmE9qIR0ZiAT8Y7ggMEKfN + mVVRTbZyg7Nx3Ti2vyL/ANpbQlS0VJx+OX6rGtGuRRcUPIVeTxuLxmUcuwKKG9dmjVz7AUo69DkV + 4cff1Af7MYRQFHvXGQ4D4jvNp911odiRAJ6yYtODyV1WAbFBrC/WQWYQkweFP8Mz8uMzUiDONYBB + 1PoZjGHzp32PGOsRhB9BcwrMRRFEtsVVFzXWpyNbOU/qqX9Pe3iH9NvddCZzgX4uDhegYO3/AEwk + cVVZKbv07nOP29nCbkOtSryfU/3DcuxhXBdu6WxfZ8WGTPyMiZzCMLjrRmGL1BpcyqqbATbM5f8A + nEpr1AHd4HqT7jrq2RMZgBWBsTEPkmLlpnrznMzB3NIRNewceD4EPgeNewuJ9DlPk/p6Eta23L5m + zvx77l4w5lt8q5pA5SualzFWGOcTghhU/P8AhV+qEWfqdzWrzWJoty3G4DtH46Bj3PZgu+kFk/cF + p7IbKoLKjP6pgRj3T/poX0UOGVjAZdZmVVdYljaD2tFMHYv8L9Rx35Y4g7n+VrPxg8KI+BBNvGIR + B4xAIQISIvhFhhAnMAz+mV/08VPbzeajTh8g8e0UWcJ7uTmvg8/Sr/qo9S37v5mP0+rk8bWmr3cm + zkX8m/8AUa//AJV4xt/Sv09sPkY5JOBUBLe4XCr/ANNkN9ytSW1OVX5K7LBaM5XDbEq3R7gJEVNr + QuARLTm0/YikpLyDE7iw9RiNfxGOB9xRqGMT68fnMY5g+mmYPrxnreDslMxhiCCA48M05aT9PA/a + fpw1uu2PHRxya0u5VEr5jFXq1t1AgzgFsskdT7GvrSi22ywPYqUWqR+kcHDfp1VSLcJbxv8A6Md2 + V4jfeMHXEPUq5Gj9Q9eAQB7pkYxMTERtWDhhvmW/F7UwasZcBqsLnXuKnsliFDtgCOfkuITmYif5 + mTB9ZzPqZhXr8wRoT0IDiIcxq8wL3jwANVrE/UDP0sH9r+mkfuwihbaTxOWdM7dMM2AZgEA7xmWp + h63yoZSy18W3l/qP/wCDi2Ov6TRzS71HM5C4DXvs9my5JlagCz+wtW00+Rby9ndJG+gzD4yixSuW + 6lnYX5DGtnQlnTwHqk/O9hsftuk/OIB4TuYg+gYhxPs4m3UxFQwgYWaeA8F3WYTEGWgwBzqtmQer + icO79tykvrdOYtlp9QV2OYBiLPwsx1YO6xiZFg4VBRud/dP0o5Xk8UUc2pdUKh15Faq+SJWAx5JA + 8Bjljn+Dqun1K7iYrw/ZhgT4pbib1mBEaWqcg5XoxVyxEU4L9tNhpj+CsFmheCfUWbd58AwDE2h7 + C9EmYyPC+KUzCPkoxOb8VqT+pkxy34F6T9PpsqG78jjvjDwQDqCXjUTPqPK5lts74nE/SkKUcij3 + OnQxOUqWBwAeP93nNhnxVGb+DMdcRAQ0BzD4+4kbuLY1R94jmp4yFTR/txiCHw0Bi9mZgGW2mYYD + PyHhMGIzQNN+lEMBMEx1Wk0ErAA1hnOH9fDf+nmcIcicK/I5tz1BuOmftmozApiiCfi8bKv/AJoo + MqSmipqn/Ub6axTVyOQtQXmBhTaLU5dauHGDWRXLO21hhgHXjbEPZidz4IejLeorYifbGBQ09eYE + VwOOkZGWNkzGP4qcHbJ+ongT8ZgHjMEHjGQHxN5nPjuJkDJzWcxs4+Us7At9VvHt9q8rhi2U7tby + bhVVxdiOMpZ1duOyNkbIIbOqHPJ5n2eE7Ohou5DBRUj8yWe6yJTYs4anPIB1bDl8wDCax1hnH1az + k16ND5B+TtkwnIMU6nIILfGpwk2qYW9MdwEbKum4zgjsmsY0MxjwTFbDQNM5n5BhImZnpTB4Az4H + UyIuMZEH3UwmJacBFN05NIVuJyErP7pg4VbHc8dLDxvfZSvL4913Gp5qPXZ+n2e8yvj288+n9nQO + HyLJxTx8E/tk3a9bOM4NdDrFrIalVAcjHIOrscH7jqRMkzUYStbC25gr/rMP1BNSYZ/z/mbTdhAS + 0+OuoMVRhLAI1azdljKtkVdZmNgBj/BT8ceRD9kz78ZmYpxM5gh7PQWAzj/6zmWruDgHkOual5d8 + r4iIL3FHIyBH4ykq36lK/wB0DatdyVcRm5i6olh5sWvmkqOOFSu4ROKDP2qiV8XR9QYuBORYK5dZ + hv8ASpW0sOB7EeNhoa2ncovFcapLRqVBEEd58jFyIYFm0G2DYcLnHcGSa9ozzeZJ8EZjKVmMwTHc + OYDPzG858gwQ+BM+KjPYNj/n412eoQeLyyz0sKjersvJ46sCjJf+oBZTyrqrq+Ul62XVAh6Jxylk + 1n+fHRmxBOCLiwlyMDX9Vt1Y3WcHaF2E7xiJaaz7EsVsGbYhPW5gOZmYaHxiBYoxCJWTh/tWzNcR + cE4hSdCLWSdNfGMwVzQRlmkCzHhfsrkQGFpnJPmnoKNb2fMwBBckH1nrMY5l9gIqUs/qLwcewn1E + hKNxyKGQ0LgU2gwMDNsFn1jW4CWbkSxhDsoX0Wx6zUVqFktrKOlZMXh2mWVNWdHM9DmHZCmXUoGn + r6ZSJ9BVLTUzM6nRiZEziKQZ1LP9Cen4/BSxnuxF1fwZiaY8CY6x2648EStRNlhpV0ZShgbyk4/b + 3A1P+5AJBYZ/boLiAOQrF+Vmcix8GhmTicMA6AAVhZiBe+VQtqamuAssHJxH5K4V8wbb1poSTLBv + A4ra6vaUcj2S2kiV27y+lFVUtaHimFLEimwRgcCqt5bxnrhDmaNjXMcawwyvuOkrJ2K2CVsdz0WU + MET5VjD2U4jfdvTVZwDiHYhU2BXMFRnqgrIArE0XGiY9KsGoE9GBWrCPSHH7ODigR+PP27Ra5XhX + vr9tbhqzxwFRUy7L0aGMeghFrBmsBxBbDYMFxGuABtzLGDRB/W+EmxBqcLK6u3/yIfjMJauXoZkW + 6Je9ZddpuFX96MjniDlVxbQZ1L6BOM4cW0awdx1zGHRjSr70zKqirE5DBYMs1mCFLKWPyW5sk/Nl + 72MBi9T9wIuAIOpsuGcYFsJ6yYWm3xU5BMzOsCfgDp+nrORyuMHFm3srs3f/AHHOAdWnSr/ztm1m + i2ZhbKs2AHLDEZ9ZhnDHC8Tiu5xqrZNobJxChzsLI1TVwWK4Yax1L2egRuPGBBqv9cq5KupbIfaq + 0H2JyEKOtqWRhHEMUYlLAzcFicR3yQ5U5zGaZ2gysKiGAFZ+XszAmQFn1LNmibYHc9UxgNkKtkuB + Iocsuxmcw5xuYLmgOJn5p9LOVVoaiQtbjVk2GrLBGbIRcTSaT8FDYorAFhClaZbZpOJxfawUKtj4 + X8Z+fhwDFOZdTpGtYCuzLbdRqEMbjgRH9bVvkXLsvBeWoQ1vEhcqAfcz8dq6sT6ijtmJ8r/iDvzr + NCITqx1cAsox8FEK5mrCesEKgWHoF4Zt1kzIsbSa6wUrn0jZ8RxiV/IARlDryKjTfQ+JW2VxmPVA + vQWYhWLXma4lmFRacy+/1njcY8i1K1VWn+o5wVB1R2K/fgriexkN/HDKwatkfMUz8WWaGzDpxnlg + ytZ1vb5VUt/ZfSm9lT8S2q92NlPvZ/gQYevBWDIh7iIcxnCzLmCzpdXhGh9TGA5mPGfBzN2i9i5S + 6K2Jka7B4thUjGMT6WBNlr/otHiypbFGaLw2IvcxGrmsCzSYwGMQe5uTaFqppblWIiov1CDZLG0m + d4sb4lD4DQ/1T/xnIrDgZpauwED6tUlFdq3VdWVdhbWan41i21hAvItXavUW0XUvxnS51Odzr2VL + T1CECGfUqPxss6PcB1JHYJWbd+2AwZE2DT6gBmJrmCuYhC2Qqs1s9hBaf5mPio2gU5Qd30e2viXZ + 882g54bbpW3Y8Y82NgduwxVWw9l3Fo9STBY33LSNXvZVwIw+PGPxjiVsLB/4s49ZtoDhksoai8OM + bS8Hbi25FQ6vq2XiH1XXp83HQJFdgW2q6tqXUxVjtpAUeMvaj5WLXNBGQ4XqZG0VC5HFE9GIJ0Ia + 1yAMdCKY74K5zjE3wVXMOUK2K03rnp+QFawanx9zmVtTZRaLEjrsoP7bl6kFD0PLHExsf8nl2F7O + MmZiYnK5a0Cin9wzrCMR+pmU/anw/wAG6sQdTul7KlcXUNU3F5AsHKp2Wj421jrHToPYyb0qNqru + q0P9FyjkJ/hhYollosgwILAJnMJJIbokzaf9KcwMVhsebmK+0zscQMIJ8jNTNdSTAcz6O+SKFDLR + AqwoDLuPgLznqau1bF+4av2zVWrYs/U6ZxbBYgGIDPxDMSw5WnscVcVTmc31Di8N7jjorHXpv8l8 + rxO0yPB+qiVLL7AU9oqUMLOPur1Wca1HFi3Kar+M21f4Ujf/AJ4465q61f8A8yHM/UeJlNDqIuAv + qhXWYGZ9nWfkQmAw4gTB1GMawYgdTM4m5zHd1PyYrW24GCuDHsnsMDidMOXxjlLXonH5S3L9xx6W + Vthcgevjs3HvzkfmNBDHbLBWscdDm84Vni8BnsC4EdlEturEflpjWx2pArXEH0ploxEYOhXYXVsJ + TyVaW0rauDx7OXV7av06zKsZX8jbZqOKMV8lQ1Fg1q4/ysexdbxo8wTACIW6GDHgXo+MfHYz8ZjE + qtdimMelwYRGU4VSC2TDsZqc4ySyh9sA9wsRGsLCr/P+hyKVcV3Px3pt9iMMir+o/Y/UKdk4V+zv + 1A+Yxh+84b/0sqqWuczkd8bhdjwRGpqlnHoaftahGQEFS0x0uDD8LdAVo/rsR9LMAjlcY1Ti8kGX + VLamRVYg/b8x/kq/5O5vqGF5Da1WfKitPSj3bl6menOrtjBOIfqK2GrAywE16+pqrAoAuiyrlLBh + 568EKM4x4Vvlh8qrCHORmYn3Pwe5jY1t8U7lvHFk5NXsnBsal1McZCPsrgNO+NyG7Ss9/l3wOTbr + XwkxL7fVVw6TFXE2AhtENsa6ezM3nUvSIcwDDWjqv5LyK2wye2vh37KVDDlcM1tw+XmcvjrarVsy + 8d967XGOJQABOezaUV549zZKaIa762nM45SB8IBDmZ7StcEQzbvHQGT9TM9C2T0aj+xZ7Q7DBmuY + RiDOubBBeNtxl2Op+3OfAn+RXZiDtWVXW+oVPW/WZX8Gz1y68kWjSs4Zjhb7PlXR7FrGqvT7j7MA + uYt2YzdM2Tk57n4WdPMaP9x1ynF+SlMrWCrtUarKrPYjDI5HEANDCyvkcVqrgpVnr9llY1G05Ngd + ukqvc+xELypQgzutiaOdZ1FrzYbESGwnxjMLEFLDguYGGNswAbYsWaMZ8Yr9ZbOZ9xpq0t/8swjw + OoczMr5JQ7BhzRtx6LcoH6Y/25hwwxhlIDnuOn9lSdYAj2CM2A7kzEz0TCuYIMYzMiWfSDZSOuM2 + LRHrwbE2Sljx71bYOgYWq1N2wupHYSK0stwvH3tv5t4WuivdndUPuEq5Cmc1P7VRY3rEBhWKFh+w + 2DZ9bgAD2MOPYYKVsiFgAwaLrBo4s4+xHHWesswDJCyrGv1NvMDID8fH34MrXIXZX5IxRx3xx0ty + u203gbMZxFB3AIBGI1pWC7LbwnImcTsgwZmZmbQHMU7CvoAZF9TU2izpTsCJy68jg8mFlVbeQLIv + xO2SpjHEtta2xCOPXoeZZbihDvZFRyUoaIuZdxbKpjJXEJ+Xj4wgEa4nFwLf3BhCmYgwkbx8lPuw + ayXFnL3gMZvHG+SzEX6xmN98d9ZqGjj+uyv1gW4CPNutp9tTTgsQse4zZTBgeB9TOQp6P34zPuKT + F+Lr9o0YBh/iJyFUhgRyKRYGD1XW3G0IwWexILBA6tCJXx9HNCWtqal5Nv7qLSKqmEqDOdAYDgWU + V2S7jsDXwWJXhUCftePDwuPDwKo3Bsn7fkVMa9jtqCUcl1A909uIGDHTu20Fc9j6b6zOK+t+OzFO + Bt0RmBe6ycbfFj63srraHKxYgyUowSwVXszDNWJxhQe8mfmD/WCZriYmMQ4VoTM9rDLTAP7Kn6yD + Ocf7T9kwGB8TeV2YPJOIpUOqrNAJYdJ/exzrKymucQZi4MavSLYpAsE2E2EB+PUasWEsojjaerE1 + eEmKWR/YGXUTRYePbqcwDJH+vycTM+USBorYLclabubxt1AzPWDBWohyh4+z1vMkzXvOCOx/1jvW + Zme/ZqPbPZ3vmbxT2Tm2v7guQl1R56qZhR45u/vVxAwb+AaXt/VYrYpvM3zNsreuq5DDRNRACsNu + k/cIsOLG169Yx67Y3tUBiEVlZTyGMzZNLSBQTDhYLCDrmLWSa1CjMt+S6KR/m0z6n3FgToDEAzLe + OttZ5dtUDZK9gzBYioV1ssVOvxr2mYIc5I607J7d222YzYwDMxOhKvlajTp1XVF3WDkIZ7hhuYJd + d7RoI6GbMIj7eMGEn1gZrK4CcjtLAxbIiD5KO2HW2fGqZ2Eb7NpDV25jPGAEH0C4tUjRDsykCP8A + dhBi5xVguQsCwnUFcpZ/afwYB0sQ7IqTUCKOrEOyyp8Q4M4o+bHIC5jOuCczcTHSMRFIlx0UOzxK + o1C2IwNbVLVYz1+qJ3LOLuDS6Cu1Fn7sRuUJ+4ILWs07MCCaloqaqV2jJ2EGT2dTsEUrgKM4hHVZ + trY3ElQTNZ6nYWJZU68l6ytzFug12zW3BmXYrN4WE2UTUPPxTPwXKG3JsHbV4wBmAsFPUYlq+sA4 + jiL9D6pYQ/5A8XdWsBEXI7E41biV7s5Y3ErswWJTg2fWfHLtyKrZXaDMzlV7BM1W3XPZPbiVXowF + +s+DAitpoDNRFYZgIEOGhxPizEqqWKjzKqn2MTXB/NIFzqpLFestn1OUVtHL7HkULAwD/uUlhyw7 + Ur8sLNacbETIx/qDqIbDDj2MmWVMRIGAJb5PAjCORKztTYO1X46zHdfYDTkX+tG+Td7AkPsJx2s2 + 1ayHM/P5P23cCKkvYBT9hIG1iWQkEFMSk1vLOLS0KkTAi15OOiuB/mBa2IqWCmf4GGMGymoMWJxF + KidAouW0RQ3xh/0uALSHn2lb4j1holnz3ZpYmLGQmceh7rUu+PLqCWGBRmzIZvtPv8jIX68fEtFY + PCIxyN2yxAThHakqSijx9T5RbFsP6lpZXnB2y7/X3KL/AFMGBRD7YVxMaRmzKRLv9XHdtDBB8pjS + C3pHM2NZt5B9WVWfAT7hafLGWEY6OlqNB9N1CwYKcAPgNdlh9MIhCz7n5J2BbM2yo+sSuzBYj2vt + HLaJlpwSvrcnflvtxrCTFO021hX5DGfzkEE+A+K2UWpWcz2Hbfot1Y2z0E0WK6vXgh4zCsDuPUtw + 5ICkZwemzlU+lUGVMPSh79vZUmBMtbbqt9nrrVMjQqAo2HcGIOierWaDLoVM2+PdkVCJqYRmbATU + PCoUqpygh7LK0rTE1GQVEwGUexQLTn6jTszOPDd+cvCZxrvXahU0sBarKc01am0MHCDGezjC9QZM + xKtGVBqfkFAn2LDkYn447lbFOwd9YWzE10v5lgc5aKDPvwBgrZ6iORZvxrF1Vkj2KqC0ojcnSMS9 + tSgm1opEJ6z0wIOheFVsRUfOxnoBLdEO2RZZg7GNttqcYMDYgMz3giVh46HJRhCra9iE5n3Am5Qb + QrB5UwtlPlsRNdojKC4BAPxyYfH3F7nY8KxNpuUQwDoiEiawdBQZRy9V/f17X8pGjcgvAO/qCHqD + ZTjIfuKFJC9jkaG3kmwXXm0BclABZsizO0EIzBMGbkQtmMTNoGxO4yFZ9TM9hQ7dhmA2Qz6gt+bM + QRyJ7iDVraWuX3C5QEL2F6TWuU3X2h7LdLjbvN58vWALC2EU/KLloSVnbxCGXTIw0Hhu4Dhw2xEF + Yg/0W2A+ZcEPjIx0QYQVIBwa4UCTE1M+UCgwLtNfljxrCuAEnrE0AOoEA6xibTIj/THpDsTkWfUb + Jj1q6/5b6hOWEttbPqwtRIgVrDYpyE6rUZUMqqjKaqzfdf8A0ni0m2zlV1reliVK/L2ldb2Coftu + PYd7cylVRX2LaPRCwhYSpsS85iYRfaa7KrksOs//xAAnEQACAQQCAgICAwEBAAAAAAAAARECECAw + MUASIUFQA1ETYGEiMv/aAAgBAwEBPwHtLOB6Xrm7utEZP+kPvPrPsv6hdhr6iBJHimVUtb/Bni+p + H1NLK6Y93VLfB/GeC/Z4saeKpk9Ukkj99WSbrN92l/DH+I8aUeX6IZ6/Z5KzR40ng/gp/G2P16W9 + YO6xV0PJ9acqbc+kf+T2xUjiRpVHi1wTPJxZ4/OTs7LZGp2RBHUpqulaRtJHkKpvgpVR4s8B+VJ5 + FLVVmRd+ugtjyYuuvYrtyNyQU1QSe3ycDcD9s4FVdkjeme1HXotBVPCH+rJXS9EEShD5EK0FeuM5 + +ogppgkn2VOEMm1FM41UxaSlkwSVrFi2qzeK6z1U0tjKR8lXv0NEEFD+BYVv4tBHoq4H7RSVU7Hl + GawWCu+pSIqJFyVf4ORMn16KV7srMrXzZuRTBUSU2qpvFlnGhYKzER26FZ3VRS5HQfxnj7wgqXs8 + GKlLkdXonCtZxZa0IeKuxdikbu7Lmb/OLtJVi+osFZiH9CidLY8nqV460dJaKbPOq7wdmLpx1Y1L + RTZq0k4fkzfQWbZOiN8ZU6KSkqfoTHS4lEsSbQ3arSsWLa7LBWnoRoWrk4KaoJK65F7ZI7R3YJIO + LRujUtvledcixa2RvjBYPStsa4IY7rGJIxe+dCxdozXXgR7R/wAsq/Gj+P8ARDVlQzxZ4M8WRvWM + ZMjGR2TzXXiCBP4Y0JjVoEiLwOlMdMWkneh4wRlBBBBBBB4jRTT1qf8AR2am3JN/I8kSrtWqo/Wx + ZRaOnTwNdRFJGiDxsqjmztXSR9Khj6aQs5E7wQJxZnxaqn9EaX2laR9JXWfBN2xlIxWRCY6YI+hS + gYyekvQtMHBNmSId6bMqpj6BIkZI97uhYLJqT2hOzKWMmyYrRJVTGEapPLoSTflYPaiYKXgtEE2R + OCdkNSNd6bRfneji0idmU5yN4PGT4tUsZJ6yRBFk83rm6ESQ2RnI1g8p9FJA/TxjqpYVWnJbkLWm + NZK/wIkrplZvGekx6HodoIFSKkgjQx2kepQrVr3JGD6a5ygajfBB4iWucZHkrSJCQ0NYx0qec3RI + 1Ar/ABohipEhLUxj30okm1XI9UbF6FkypJrSqZEkQrRsqyQ7QQQRZKyJOSGeI0R16Xk1JMD0L2JJ + b3xoSi0EZqzcHkTZogi0EEHBH60TjOdaxgi1KkSjr065PLKe3UMWKUi6LtBF4F9W3Zsq5IxpuspJ + JxmzI1TtnrLB8kklWLKbL3kybSThJOM9TxPg8mJz01wK9dmycWcCzqf0niU9OliJu3A8nhIrSNxo + nNa5J1wc9JCfsbJGOyxdkzyu2Jj+n/3pzedCwTGxDII7MEbeHapi3snCM39eyY/oS1vn+hPXEkW/ + /8QAKhEAAgIBBAICAgICAwEAAAAAAAECERASICExAzBAQRNRMmEiUCNCYHH/2gAIAQIBAT8Bwxss + vYs2WX60hIY8IWEhkzVxiDJ22JUW0JieaLaLKs8y4Ko8C+yTos1EqZ4yXijITl43TLsWENGnDIrL + XBGHOPNKlmyxse5F+x+lbGXTwu8z6PGuNl0OTbEuCiS4Joi9KrDdC8haY3RZKKkiD0vdqF/exEuH + ePJK5YUW/gov11lCzQ4WLZ3LaysIl0IZJ1mOXGyUC2j8hd5UaxWJTLFPUuR+So48fi1diSXWyity + Flor4CFtkqIjxCNZRexEyOJ94Ss08ZR2OJoEksJbJ8LFCPJ0ePxXyy2h+TNl7FtsvckUP1IW10aR + ke8voU6IZrHkFif8hIXBY8K/RYyfWFFs0GjVLkbUeyXkvY9q98o7qZWFsbo5kKJRJEOx58ipnj62 + zI4kuSKrYpofZZY/Ix9HiwxExFZlJRHqmyPi/ZpSy0V6a9ckJnIlRZy/o0vHBbNQ50JXyLZFDLxO + NkONsuyHY9sihSFxjShi2N5ePx27li17LLL22XtkULhcnMmaooc2yN0JyiKafZVdFWURe3yfkTIX + XJaFihbJoguR79KJViLssaFmtzNOpn43+/TZfqTy3ihW2KKKiuyUosUkPyCcZGlEouJ9YTy3R5SL + TJ9GttHNcEfLfDFIstYkRzQ9lo7KKysXvaG9PR+Z7LG/RRW9Pgea0oSrEo2UjhdHYkmLhFDj9H9Y + jiUW3ZIh2SIodDsVpWadSPwf2aGumKX7FsZWHhb3zuR12eRfoaaNSL9Dyiy9rZGf1hshS5YuecN4 + +xu2WdNDEMkrLwsTQkMiIQ4krSI+V/Y5nLIdZeVhbn6Oi7KxpK23ixliy3QpFlljdkIsqkaeLIK2 + LMpVtjKxYkrErKNVGoZC/s7KGRq8OJxeGqIsvdQ3tZRW/saoWxnO37xQsMkRFhoUCCJi/iQ45Exy + ov6JoeyC+8JplqyHYuyZWNIiyxKymWyhsasXG5DeK93e5lieaHtkIQhlER9kuiP9ir6GrEueSXWH + hEDSxKh6bIdn2SWOCyxcCE6Hihf2PCZZe5bWIRW18iW289GoW+QhMs7xHDQ4klpYpn5DVazwWQfB + rQ5XwiMeSucPEusaaEPgbsXKNOGxCiaSity3ylpFJS2MS2tjuy2hSeGtl7KNI+Bsh0JbXyqz9bVi + iPWXifA5GoihvCrMli9zOcIWUIZ5Ojxdem8XTGIu0MbYhkRobEx+ShSsfIlciC3Ma/fpSFtaJRKF + /EhiRF4d4huZQuC0IeKOi7OyPocyhlk+jSxcYark1WMjwSmNWjX9Yj1iEeRb5d8YW+O+aTEPojiT + sgihrCosvZLFjExSzPoj6tOGjSKhi5ePI/oguCiiUbOUiKEjxxNKF6JXhPZQsQ3zVjRLoh0NiI4Y + xFCIsbwysuVDkzVwXJmlJWRp9e1EuMaeSI44rgRrRLkiTIS4NcnITv0MnRH+Q4/YqNKHSFE6Et7K + Q0XRZF2IkLCyst2LDQo84oSEhxp2hN/frRRMQxFjdM13hovHBFrEGvV1zhxsojGhsoXoksSjzwL9 + MrHOVsckI+x85ootrapUy/SsPDENDQllI+xpUJDs8S9tZr0MvDYsqsLKZK2RgcDw2Vl0X9D6LKNO + UxO9tFYZEe+WX2Mg/r5DK5H2S4Iu3wMSKxyLFp5orFrYx0R6JCGsaTSiqO91iKxRQ9jWNJQzwpr4 + 94tMdmmMj8bi+BLFiLLxPgTNQnso5w2hcj4whjmk6HIsSOjW/sb6rH2N72MXW+H7+Pdl0xq+URf7 + JL9CY0ahyLLRwWNfvCRQtjIyJq0cFEkyPGK5GsIY42UWUaSsViihxNJpZRpNI4H4zSLj40v6FhOh + qxWhpPouh8mk0Mp5TwtzH2N2SdEMSdCkXZGxiePvFfCl2RfxGSL2WNVhcIs1YcbGqwsLd5P6KdE0 + 2yEeMT5FFJYSy8JFep+hiF8NsexYsoazZqGrGRH3i9jFittbV38N8IRQvhPL39jWUhEkI+sPCZeH + E5Oc3tbw7XQpWPyR+C3YhfDfI/TZ2VhFDwih57FhixRW+jSfj9jLzJ/RQsL4b2S2d4TotMawiSEV + hoeOhZpl7LWHYnxsr4FFfF7JLZLKy8JlYZQstYfe6tlL41Yv4tElhE99CXooo/7YWyxO9lbb9zZZ + eK+KxlM4Q3exbF6q5JFiwjsrdea9reyPxmWXeF6KF6X2PkS9DL+MhfCsschyLL2rK9jw7KEPCWxi + WF7n1usTv3tlmob9KKK91DlQ2LHRZeWIr3y63qTQnfstDkNjfqQhe+bKsrEXuebP/pa9jVj3xbT9 + LlQ2y3i/ZH02WWy9rXJwhtGr1y6Pwr79sluuirF6JcFt++PfobxZZe6WFGxQK9FFe5rfBi2WXiUq + G7+PL1tGlf6GLEPa3Xwliyy8WP0P0OVEZqXRfyUsJEet0svcijTW2sIv4zivsi18Z7I9FFEd0sPj + FbEhRzWyiivg1vUVQ7rgU2Rd/DfY8wwkVtWHxvgviv21XQ1wfjZ45f5fDkhlZojuWHihorCV+9sv + attIr1IUFyRdr4UhrgSGhC9LRpylQ0Lj4t7F7KrE24/5oTUla+LWX6WhIkJfLsv2Loj/AMUq+sT8 + n0iE1p5fwK/3DF0eWDl0Qm4Mar/xXWPJGpD5/wDFslDUhwcTSf/EAD0QAAIBAgQEBAQEBQMDBQEA + AAABEQIhEBIxQQMiUWEgMnGBE0KRoTBAUrEjYsHR4QRQchQzgkNTkqLx8P/aAAgBAQAGPwL/AGKS + fy0Y38M+KzLqcJ6f7NH42Xwz+FfwydyR0YLC8f7kvHPgt+BcaJIaJoxtdktz/vs/iSNmVX6jW5df + lMtKuc1VjX8hp+UuWX48EE/lXUy5NLg0v0JWmNkc3EpRavC9L8NjNV9S9i6O35SPAof4liZf5TN1 + IE6bdDemr7GV6dSaHYtTPqZfhW71QjmfC4ZPxm/RERYf8RU1dHY8nD4npUWorpwvZYWXuy3vY7Gp + P5e3gsR4LvC5NNyKqfylEdMObRkTIqlrg1w6opWrMzqddRnqt/yP1VFTVPLTEibfuZuC8xk4ubh1 + oivmX6jMiG49C7j3k5azyw94K/QU/lLeKSfDG68Hl/KOqjR+ZEvQsrFy25HTVipixSqOFn4r0seW + pLsJviX6Mqbav0LV6idFVUGT/XcB5XpXlM1OnYy4OxO21i7FwaX5vN2RbQicOjNPxZZC8CwnGPDK + M2hYUmv5XJm5Xc5pOWyHVX5RcOm1OH8StShf9NSq+5mqqmqrREVRa8HK/wDxF8OHPUXE4tLXVE8O + pOkXGSlLVCro8rw5ubszLQOPqR4NcLrwv8LMsJf4ULctqOfy0xr9iN+x2FwaNEX+W7FdLicaq0bI + XDjNXVTdsUu1Cux1b1KfToU07vUyt3V0x1v5fMiabpqUU8Th186fMtmKniU/w67T0GnfhvXsWZ3K + p1ZBmWjIwSxiJLeGWvwblvxLP8vrh8OEiV5mUOiFW3Lkpz8RTTpBRVxfNQ9VuPh0uMx7z/YpT2Jk + b2q1Fw6noV/Bp5qm4EnxVxF0nQ4b4t6eJaoVraGVzDM6TuQaErC5bGUX1IeEUs5lKJSZLfjt448W + n5eBvY9ymhaJXG4zU0atu5m4VNWurZHGo5f101ZkZ6HKRGEFh8WpxSfwOHVVOlTshcPjUumpmVUx + lucHuUVT7ETKM+5EGWD/ALSZPwqS9FJoX4ZEM8/1M2ZMlU2fjgnF31xXgnwx+ZdUO4/5XIlSr1Mf + B/1Kiiv5h5aPicGq6yjp4XBqh6zSP/T8W9OXlZYuPCj/AE9GtU5imuuiK0uh/wBZxuThcPSTOqP4 + bdrbFL/SzS6uiDSosn6snCFhNJJy46izUkjnwLGMc2wmRhPinwW/JTOKjc/1FX6EfEpjSRuyaIor + 5ekShrjUU3/SRtPgk9VBQuHwaamktSeMpoWlFCsTm5qlZHOr5Dh72HNGQ1M2afBHgkeF7kUlvBJK + wRO2GVEMthHgj8zLI2L+x/qKM3m+58LVCfyvoPnv4/QVhUKq/fUU8Wp107NHFjoZuHeqjYeaFJ2M + 2Fi5mqLNGgvBBfw3UnK8crxt+f8A0o0J/U4XcyLanqKp6aMzqpQU08Kha+foLh0UKVrV+kdSqlae + OBJKelS1RVXxqf4itPUp4FL71ehxbKM2xl+Wq9OENFsUsNReBvfC5fwxEl7E0mhfCMJJ8bkmfydr + nVmf5nyrB0VvVmWjmpqK+LxamqKfl6jr49WSit2p7Cpp0XikTPiJcyXNT17ioVDp3tuVcTiz8Xid + dh1P5ilvYgkcq5qPHv4Y/C7F7lnlf5ifwO2FKWzIbvSZqfOj4PEf8Sky8KmluvqU/FdfErj2WHfw + xVodiWnalxV/Qz06RuZlK4aFQti912JXNl1gmlyTvhL3J/CyseZFhYTjYitXNSVEF/z1l7scuY/c + XD6OX6kmfh8tfUy8aOSxK3siuqp3ejIq1S1HwuI99WSorXY1M0wurKY8lKc9yzhKUV8GrzQKmuae + GvlykK0ERPdVIcNVImkl0ulkrXvuW5X0Iw1xSqYo08M438EEVHmIpZvGE0kP81CxphPInJL4lFM+ + r/YyU1V19qaBr4dX/lVD+h8Sni3esDq4izd3crfAhKLNMUrNSZ01PU05X98MzccJddyODRmq6nxK + 6oi+Upy01LiVdHdGZ1fVDdPF4bXSSXlvvMfuS6Kl3gnKv7kpQQxzQvUnCVhchWMubQnCcZx1x0L0 + lsMtSM3DwnR/nP6seZv+yMy4FL2Tr/sZczVH8vLThTRQm6toRHE4b4lT+XzNewqv9LxlTDum9zLl + oqnqc3woMlaVRVQ+I8lP3MlEUKlHJVwakX4qoW1xrgvPxN6uhD4tdNtVVZnPlqf/AAy/sf8AqU9b + k0x608r+hpP74Za1yvfoNarGxzUXIWMPQzUEPw3wnCUyCSVhKZf83CMhJq6q30FZJLtJ/fDkTv8A + Lw7fVmVummnemnT33Z8OvhZUvmVoR5+M+kvQnRVdTLwuZ9R1Rmzan80TlqGqppj5qSaVxan0b0M7 + oVNZ/THvi6Ws1LLJx4ZLPGxOEE4X8cYPDLUa/mdbs5rxc7mZtL+b+xZ/Uu8PUgVdKhfKv6ltb3Zl + dVdTX0KXGXN9h8sX6GV6/Nmeg5rm25PzU6FrPen+2EVf4ZKvT90ZtVv1R1XUjUaiUTS89PQ2VR1R + Kc9jTDyGhZHlNGaFnfDTw6GuF/CnJbC6Mywv+FchFiH4b6EzroSouZKb/qrPNd6sWbWry09e7Ofz + Mb20RVHsU00fNqU0jqqRprY00NMO/UZmQqvqNVQ6N+3+B07q63//AL1OR5alt1Rm0JpM9NqjSE/s + Z+GlPY+HWrman7GXicxmpra7I81X1NSKS6JdKaP4VeWrCEXIZbxS0XLltcIZmpJ28Ek3/F1LnLhl + +pmO5luZqv8A9Hxateo6nv8AZHSxTA30eFK8ElyrsM1sTNyU7U//AF/wT0qOXXphKUoaq3Mr0JVq + upl4qldTNSznctY3qxmn7GSrUlfgZvB0Z1M9IrD6YaYZpseXCxfwWI/CiS5NKuU8GfLq+plmy+5b + QjB4PpSj0LiPU+2NzLFzPUWLxBe/fCaXBkrVyaSKlcs7Y293jbDMtDMnc+JR7rC2MjTwvhKMxZ4Q + y2F98JROFsLkY2OZE0MvqvDBceNXG62HXu7IjuQMuOB4sXrg17DqfUynxKiESz+YWN/ZmTia/uTS + Q/Bof4NMYJPicIuKlal6JT3WM/hRVpjCZGOng0xzGsGuEkmUzrYTWGUleWLGZY20xQz3IGZ68M1U + 5SFhdWRHQ1uS9SS+hFXl2fQjiXp2qM1DL+C9Mpk0wZSYLVGtoNdbPuZalarcTfsyG+UtsR4LeHuS + dV0KrSi25Jbwu+PLqZalDwsoIZqa45TI/K9MWmh0a3sWdvwVU9is7EJYRFkQRhKJW+GSoiu/Df2J + V+GyUdsJGZXoKpaGhnV0WKqPfDJXuRts8JZK8VzlE8NSxJoxKYg1kt4Lo1LGhpclI0LmZE4xuZK9 + VbH4tPyjVWpHj7EdDsjTDthndkWOgxqcJWqIMtV+Gz+R4SRvhcyVYO0mXQp4qwuKSKvbCTmRZmpc + vYiSEQyN8IRdnmL4WcGuFy2ElkXZ2IMrQstRGZFnOK43DXqJp4NDoen7if18V9MKaaTr1eFyF5uh + 8Xir/ihR4Koxz0l9z4NemxlflwzUkPzIlYLBRKGhFQvUgy1WaNcJku5LPDQknHlZqXPQsakELC5b + CCDLAmSzTDlmirqjLxeZdUTS8HxOGpW9JNLwp4q21MvTXxtblXFrJ3qvhlovULi8dyuhCxaGVPwZ + ZL/Uy1edEOzRBm+56kieGuFU9R4UoXGo13xuct1+FYbnUszseng1g0sdC7sTJOGuOalTYUe536YZ + 6PLuiw0+hXQ9J8Tgt/8AgqaNPmw+HTqfF479i2F6kjzos5Ohl8GekkTWqPi0Wa1MtVmXRFSinYzU + 6oyvY7klmXKk4fqIXYaY0jQgszmZrDL7FsZLrFT74Wvhd2OV2NDQ0w9CC3g1ZlqGvmWy3Ogng6Nt + u2GdbGV6wT4ctOu5ZC4NHmfQXE4t68bsn4dP0L8Kn6HLRgr3O+MdR0MjZ4Zqb0mWp3IZ8Ju3dkbP + C5lm3rgxR1MxYzR5cJxvg4fhg0Irs+5ovUSzDWYu7EyebwX1L04Th2xnSoitcy+aD4NWFiS5br4J + OUq4n6h1D4tfmqLmnjnBk4KtbGanUyPVEMz8NHw69TT3Im60ZFWq1Ms6kvDla9GXUGTSC9zKZloz + K8USWwuSnPgmBKmrIyaqFVG6FsSWwsXghrFY2IvhKIYm/Z41UbTbCmvoIl9cHTsXIKc3kW3UsoL/ + AGw/sRmInwwSaHpg6Wmj4tFROGfh2fQhmenRmdK+4nGKozX6PBuS+GSumzHTUamp2I8ELwRZdDzN + wPoa/UcaltSE0zuSbSaX8MCxzUlXZFD6rBPqsLj6ISe2D/mYrHMyw/7nphDZ6GbwqoWFVLWEo6GV + 1SsZTt0OpHQjHPmzUoyJ3Y6noRbCGT1wtqWw1vjOHKpfY8pVz3IqrsXi3Q1cHKedi3gf8SS9Lvui + JLSRD8PfwVehr5ahd8YGX3Jr16FkvoamsEyLoMsThbwtY/Fo03E9sXFP01MsyZqnCIy27nI4NIxy + oknYtohuJIg8x8Pi3XUlc1GFvBzIUV+2Gscr0OVQhP6GmGeEWZZEVZrdCVSRTZeB09MZwlamlseL + EZanNIlh3ZYpXe5nev7YamhoW0w0xla+OMcrIw7kZYZSnsXR5EaYdUVfuc3F07HLddhcPhmXCxdk + STEM5LnPWl2VyZqfuWo/+zNKl7nLXWn3Ip4lD9TOqJ/43Jpqpp/lq2NDQhKxBZU/QiLs0OV+9OE4 + w97eHU1NZL6D4buti3JUQakInc7+CI8FvDqSWwXg2xVPaX4IwifqKpbnPXUu1Jaqr6jdOrPK5LKm + PU5qXP8AMrEf4NH6yeZv2JHVTTm6nmuanpeRll6Qjo+hOXN7EOmks/6iiv6kzKP7CqenqSoaKm1K + khKH2JXDq+ho/HubYKjiOFXoz4lGq6F2XqcFmzr3M1VdUdJxuoJOxvbB4Sa+JUiwy502S/seX6ll + BZlTqUKrR4W8PuJrQy1E7HNcz0ttb9UeaaN7G3Q6+pOV/wDyOe3cmRumnUvb3MyZKObQmJgTY4oq + nqK+ivJav7F3m9zKj0JpUF2cup37E1JVdzdFPhmC9sKV+nQyU8R2JxjqU09B2NLompZW+8+Ds8Jw + hprHXCR1YRVoRTSkjU80epm26kUkGxYkuWwc7Gv2J+os+nUy03wqpXWwvm7Db2PTqeVPr2JVLfZs + 0gm/rBuXgdOp5bMso9ylNrmvY0/uObQrIjMR+24rO2Es1NZwdfyoin6k+DuSyUsJxmTM/bDuQjXD + QvsQZiNFhlaMtdLtqeWPQvTxFTtVJHxJXSpHI4jXchq3VF5wtS/VszKPoNVNayXk0NGtiy+pMfYh + 27YWYk/3FLhsh1RKPsN0lqWm10JJy+5mVSXqcrofuTVQ0Rlpf/Imq55fcr0l3PNoXudsZ8prhy3f + Q0uPL6n7nRkJo1T2L1aD7kCXgfd+Ox8Sqy6DrqtRsjl/7fXqdlthLwthTRutcL4KpUy9DT0MvxLN + aC/hLuf+2PcdVNNM7ol8M5VYvKNmdyXcjMluTOnUS16tGkeiOVZat43JSTgsnYUpH6h2V0NVPTuR + l/wRljM4vpJlSU7QL+H7FpjudU9ZOR6bCcaCtVcgzVZ49LExJT26nJrNzySaaGhbYnqPcnsX0Lai + m6J0k9Tb3Km0KsmMH2GZX5upl3WqGt9sIZ2w5VKF8Xy/pRbb7DlEkjgSgn6E1Fsbl5ZGSn6EU/w6 + 39GNOLdMERUxw37Gn1JLVVe5zfUtBmZZXJq9RseedRPv7YXm/cbp06PUbpt2FNMpap7i4aTtcjoZ + Xt0JVu409UdzpOgrbEbUiURFoM1K5atuhHQU0nLLREY3ZoVGmu5JtjOpU9GyqnoxMjD1JptVToZv + LUtUxOipZltOMkkbGZaGam1O3c7mZuZwdWyFBC2G1++KwhqH/QTWpEQ6tyId9y1KHHymsmpFSlHK + /Y6Gom4mfqKmP8nU6EF63PUcX9PB/wAS1GwpH9yVUiKp9zPvEEq63sZXvsR8y0KnvI+IusCq/nF2 + 646QSSzQthFN/UdS4eSpfczJX7nQuSsJ16luuE+w6tlqW0gv7M80iHi30QqI2MqMlCnuXMn1FTQL + 9T0FqibVHQhiuql0HZaiwU3nYiMpm+poXXuWR0FozRWITf1E5cDykPcWV69Ty5n+4lV7mljueU5a + W/YV7mopWq2N7dT3NIO41fuafcndMv5atSeI7ObD+G7J6FMqGN1eyLUtr0MzfsKCMLbCMrsOemmP + cjLfCN4HRsZidnqZKtSEx00eC5Hcm3Rjadi9d3qRT1JZn+aq7NJZnevQvsh7ZjU6dhWj+omjSIMq + dy1Eog29FUXS7dSczk0+gtzSX1JeDWb2g0uTEexr9hKl0/8AIjPMHY1Jj1g8vqa2Isjk3w7dcPXC + UpFtJG5v9TpsShPqsNFEHMz+pZb6ipnB3iLHWo1LH9z/ADhJrcy1bEV3pZmoc7GpGLexpr9xbCKs + 7EaSyI5RJWLib0FH3JqMs2PUtEzoQvoTzExfqSM9SKEvcu0uxGb6F0SjrJp7mjLUL1IVja3QiETO + Rk8TK8i3I4dFX1PNEjhWZcipwOjh3Wqgurbpl6dP0jcQS2rM8ysd8IV3g316FtC2MSJbmkDzKMLV + dyNurNWzKlzdWVUz5XGMLwJuVPYuiErkRBdwjln18M9+g/XwTBpfDU6WwjTuOmpz6EYTuQzymj/b + B0KEZvujt3FbXRyOhU+rL2sRer3LNc2pL1Hmq5KdkVU2J0kooTnqSko7mX6zoOuYkdfzDqeuHNdj + ppTnSTLVRqf4NTMSXRS1o9UNKz74f//EACcQAQACAgICAQQDAQEBAAAAAAEAESExQVFhcRCBkaGx + wdHw4SDx/9oACAEBAAE/IWVLpdDCbR/8HUhB8FEUYMoVi1iNrn4KzHEB8CKKpVAIBUJcz1AipQIU + EQYoITcai4I22RgqNYFID4BMGrhbipSqGGyYPxr4yzHEqvjW5Q7og3zB0jyfCmCGzqMI2wMTEoAz + eGOIs5gy30ZsBfE9TOSyUvLPA+B8PFKuOIs3DLowQQfAMIJpgXGkIQnwliOWoMcspJZN5pKuYY7O + VS8yqHLHUUK2EbI7mEpDuEjBrMa7YHwa2qJqGdSu8tHu7gdy4xsi1DMLKqIOYoShM5n3JT8jDL1P + ohdFXHf85PcBHMA3eWJHqQ9vxXwNS6i5jaOIqyv/AANpczTMKPxSuEAg5hVcBDUpCjCpLv8AEkLI + plN4jJIeIRSi05lyqYxLhcacywlRYGNAJ2/+FVlJqGT4F0IbEzBLKqhCYFsTHiYAyj4Mzmo4LqOg + EoSs5OpY80tpHo1B4Z+Jakvg+LzWbmosS8wmKYsPnGksnPwCBiJBpmsYvahKl/8AivB8P/gNfjyl + YjD41GfiqYQrEmooS9wI/Gg/GkNLFaal4PHcwzgEZ2y8wYkT3qCmmeaXIz6sOtty6lChtmmDZQ9Q + hiPxFYglEc5hcWWQgrNQ+AgRsLC/E4E8JxL+WZLiYRtRLgfMRMkUjmJn4HxbK0oUbPc2wEkNURZD + cFzaB8ZwKmPwau1lkVTFm8bbCWBVOaX0IvdYlCvEbS20WBbHGK4aTBlbocyl3PqWZl0uYVz/AODK + ASqwjgjmDp8zcCMX/wAT3GebMYD7qUPZh1A6tGLHI/HxFRivrcRd7DNwXcr5JvRV+5lwndfHDFtj + HojNFVymCNsDuPLYvEuM29eJeIJBU5hqWZlhLlxr4Zq5cZQZivFKgSkV1KVUNY5zEKKqZQZl2YQr + 5FmUtcekSNyrXusS+2V1FDMQSL9zahcKvgIfDlBXwWWYbgTbKojGcX41qXuDO+pU7aGOoTJIYlfh + 3FuPCRMoJt4diKZYXDsg8LfcH8N8/wABMS6+C5fkvQofSy5WD1AiCjS8k5k+chF2CzfUZuClpmMA + TJ2eJdV3ts/WMF4HPDFR4F6lts+HgiVEl/FcUf8Agyfgsm7EqEQELq6nJKnOoWwYKgfyzwiXK3OH + LIbwYEqxCDiRw0zD3X3EDEVLKo8JjSpLVKYWhuUS8TeFsd+DGOHwoymZ4yqjWlcRU8D7hzhbM9w6 + IY2HjxM4AvPMQIOSWw96IqpnCvMfw58oaso5dQAi/BzPoTFzBA6u+khh7f4Ztp0UnYnUvkfkqDLp + 97IpGKxlx6gAnRaX9ie4IqLQkbOsdzDYfiABTEF8R3/4mn42aSsygYjmWyykwCEpcrMJTS8yvmmN + cUI4iMNWlQHmI8kWWeUOkK98EW1PDK7k4l+5MI7ZdRQ4gxFfjrgjiKZwDaZKmOBDEtAlb/hSCLe2 + IZlrlTIe8DBG1m1RhxESsgcvmCx1PCaxvpVy0E9pgin5mkXechYzxzKdXpX/ACY0tDmFTyGvEojh + TfcNxrV6/wDkVxWGRp9ZsEZx5gAhclDGOzHSpFqmU9+yUXwis2IwYJzx8Klkb5YoruJEveZxDcyV + EcksumdzoMwEho88TUW4ry38LKFm+IyjUD7SZcW5JWStbngZNofwBZZmXI3+cqTRBFMkxzEXLinM + 4I63KsMk1sqqelcuFI/cvzI8TWQOXmBGJuyAdTTzaMoSikRbOMQIjgEFFILCUbLZeHF8QdScMB8e + GNMyUhbcK0lxxCj6AcTYaPHEKl1jRcTzW4P9Uo7S65qcoF57MS9wwFsmcJolznHACUGqJWnEyhBc + ymSKxdwBj4YYs6sdy5dLL0VQL9IpNqJyYDXME4l0VHUOV+MpUXmmBXEhmhyQykRle4dalIa+AEpN + aXw/aXqZysuUMbotfNzIFnalDzFDbbitN06hYtWYMvq08vUVZXN0BHdDzufvMPz39kv5Yt0bpF2W + 2zKaepQSbX7E5h1swnNyphjjh6lmrI4ri4Bq7X8kubjhgYX2xH/x6OJXAiwQWo2qMkmwVmYYxCy6 + alk3A2n1Rj1gWvM1uPM7R94C+5qsNyUPw1BNYhGDEEHT4EKXDNTfZcc5mpXL8DNnUYYgm8kCvVMt + 8HZ8DOSWZSwcVBNiDO7MxDAZkxLmTEE8cwYlyvcsNhIE6O+YCurz3LQuEm1qXftCOu5HUacAw6j3 + +pdr0H1zAMhcqesQbNkt+4S9FVByl56OPxLGV6PEHbM8cHmdtIMYxSwIyDORh04g51omCcgcZmDt + yyZwgrrmJA8mXS1RBmXFu2pQ412Ii5OoNtz1NJmNWHimACIGDUsmblIVmWWiZF2wMplcq+ChJ6N+ + AYuXGJmIgNsyYGcyr1xHMxpmDQN/N0xQvqNCAlmmEBwhTBHqGPNTNR0RLJlL8Ipmj0FDq5xOeYyV + fGG7i0pQhyEB6jCpphn20SlijSAPcNbix1EnhqEBjeWCxwjBBVamMw1bz2wQn+1HmUAarGFgZoeT + zGBNLzKtQax3E7PJPxQjqwDVvCZdrmK4WeowIy09T+ss1zgJEr6QMxJuWWh4JQTdPTLS8/UuFe4Y + hcx3cDFAyHwBs5lbwcJW3K3EwpNR4yrPMwIdEqO5pSyh1EvZyS5f4o3KkxzK1mXbCbI+JRVzNlGI + bM4HwuG5hLqVs2eoK1VK8s0XUVTyKWXnIT8SuSqaa/5Lls8ySVLuqHnUTJbK48RzZE9p3McqJkpQ + TkPG4ljVY/VlrRmB6qMHRgOAajWZcM7qHZAVP0i+p6gkZWLP+Ro51AUJRDPMwbYYw5g3Fh6Ki7Eh + 26qYww4GWAvZNhjzLlROElFLjtOUniG2UIY+OqRNkcGEZxAOYDNviCiuo7EMsMuNTIRuCsS2eP4q + iMkWIjhgUYhlFcPw0kpHklCW6im/xMJbCOtA/qNvPtnu9cvasn7/AOTGi4e4wP0koTU5fgcfRhHw + EaVcqrYUPUAVUIhFHMVIva9yE4aOsT9Akfp/cvZbCVOig5POJXAosRmbNdmpnZp3UfQ5UuYlR4mY + jA41PMXCnJuUEAUN4mRaszcOfjAII5cXU7W3ZDCMsFc1L4YjoNR2DTErbTQ7g7EkSPFKGaWfWXW9 + R4MoM1t5lvRBaakvBVJUJIbQbSXamGhNz4WdyjDLItcTSUMzbilK+Jl3UFh+SaZb5ZmxofmNKKL2 + 6jKxLUe042DAzIS3A8pKmGDDnXiPH9HiXUsECvMfp9oVa3xMsRshK4Yig6YQzJ3UZ9EBrzHWw2nP + 0hdf41VwBfLmo9TNeMwGSP0g2bTgpon1B0SF7eo0SqzEcXFe4MbBKwW5SiVU2in0gXmSnGB4YOGo + JoTggQJZZhLNI+Int8YNGD6YMhNyfKwPiOURO8mQzMWpcdfxZgsy7uAVJgXHESposCtzDEmWY0fd + h2P13EBjoNq8ExhnQxzjX4SezFjbN8mvMXvBm0P7ZjAGCGJSXv426cQY1BeTUJDy4iBBrJEBAtMp + OvCSuo74/cHbH1iAETUjDDQ3+iKCt4jG4PFx9nusuIpqsQDAhi1qaJxskuD8X76TOQyhNu4PD4MZ + kTZsiZaJhBi4IZ1+tT1aFyL8CpfihW+NtXcOcXiGpmdqVibGlx2QyYipXCKYqMGtQBcbmJaOYRNX + AwSiOpf4uXVTzg5hnZyV/ce1SC9Xv8Qiu7Oe4TQGVNzKC6xH9y07ql2O5SDdBj/GJgUWI6x+peyU + bXA1EYP3NhwNzrasjcM2ReLA+uRZkOIgWukOGrXuY0dJY7CnmWukTmDyOYoC9eJ9l8wBQ5cwYlw1 + LmoxbAWImMSl5mswRe2ok4ipmYy5QqsRgLXuZk35I8h6lyZizWbzFlCccEaiTUzbqWMDCjBeSYJe + 3wWfFgq5hmD+JeC4gszERZKCsEwjMo7h4wY4hcta/aXq2lPicV6nuWnBEYU3U6eYZXWt2schUunP + hBGtManiDxNat+otQimt4RE9JCqypzwFvcOo01La79E1DRag/fEc3BVyV3DhBzTOEhy4ipm/rBGR + 42mKMsWfiSPkttLiyVG9EduSCBk4l7UFozxLm7YZkm0alMnkmDIcyqXeu49fXCEPDDMSrm1hv4Rx + uIhGTEz8Lr4MEujzMj4KyahsSjUBUBF0SlHBdGAXNwjm1XzklO13tQCs4DvjfqXn2ziFyBq2Yl5D + dtzbovwZjpxz2puNgdsbmUoY6JS2PmTnDxULCJwuhtJrCJaKxB5gJD+YmvjAoupRYIxEuh/xNxMW + Gcpf7imr7Y/MBYHdafMOn/UIGUrc3xCS5qIHbMiKJpBQohOOfXxpOo7jUtqbLbBp3TBF3IBMFx7C + NlUvIKjVkohFx7SRyFWbCFSU9RWkZw/rLypZoilmDmWzeajmAQCmCMkFiMNPg+Uuufj0GJT4Tlix + UcH5F6hhb9JkeJcNge2XMqstwfQziPKt3+f4l8lXRR+UpECrZf0wkWWQq7Dvx+5ZlIMF6lCIu70Q + hYDBzcEvojj/AKljQ1DCUUO3lAJbNph2N0txYXBX5CX9QXf85X6jqBrYUf7xA/hkb+TC4l4T/EwO + bOuIxcOk0x+sumX5Garw+ZSdpdMQ3JU1JTZp4jBMOZUtY7MRxWnXwFgEqYWzdYgKrmVdIZ4hygmw + gOaSzSxhpKgLRzS7lZ2dErFnwEqKlMrHjblvxxGYUscwxLioiu0yQYqoOZSrKQu4sJOJFjSnP3OC + BRRTsMBEUiynNeAN/iWbE2CflTAQ8kED7UQHeQKJSgGGj1u/sRq5wv8A8t/txEX+oV6+0pi7QRUQ + MdLiN/WeGOaUW8jqOk4BeiD+oixr1KTm6Cv4lAK2xTcLf2g/3Eyh8wL7rMbwN/C1++/rcK1da0n4 + faU8jsJUAKH7zILj4L3xEVei25oqWsViNcMnE6B2IFOCH1+JQabljlUuXmY5jumGjuLdE8DHgO4L + WciPDU6Bm4SnJc2tcxPuDNMde8a4qp3IErTeQXwFoMFRFzAlr4RKqYEu4YmkNzU7YRJYiJE3ljkE + xUR4vUqq2dxLRvtmReWugebf6g4/rwfvBzS1yrZV7bi6JcShvno/3pQ3nZF5/sZ+0uppGD+lKLYC + xTT/AORoaUNu5f8AYYYJfMmBeZ1XoEJh8mwvn+/tAXtnL+h3X7mqo2FX7r9n1gGww8qGHjm5h014 + lYEAf64kXnvv3HZ/zDhSYzSP8MQDeKkaMcJMiBfzHMglfhA2yRld1Lk/pDPqYNbC2BMFx9cdZTo/ + eUGGyXvMFMDDLMIwLKltyjIQVh6l7Qh9liDDoXKy47hShlliY4mozBA5hVFTEVUoYr5MoRKlcybg + tEcG5bLFVMZ4O5bniP77fEShumDqXm2uV11/aM7A+j6TMqZ72x3YA+0TV1b+UEZeVglAb4B7dv6h + QWXYzDFE2diZBSwVgTCQGhVl9QqB9AwCZVQH8zAnteP9/tzMihwfuDBXHZxHFjOl/wADOfJh/ojU + CvA/RAda/K/qWL1ddxmlZ3kjqvtmSHF6GpfDJ1CKT7bjEWEwJedTLqrzKQ1nMWA1uBzBRMHidRSM + rQNkuZu4J1+Ei3UQ0y3JcDfCoGiKEyZTLkutVNnibRKC3cqiIVUbgjV8NhVcog4ikZVx8EawbRi5 + EpeGUTemkoJaNpUSXDMTDGqxDb0ZYm2LwJ13KhC+3V/zLvKcpz/yFgW53ULAW1rv+B4/+zc2n69f + 31LdtWlgpeiP91HXRt6H/wAZRHBLiWdxV3RgdRIyrXmJ1y34j3sKTPmBzQ6fuKw2VDy5jG7eK8+Y + iwhsX9vP5ER6auwVp7E1y14l3kG9ncdkGht1EKlI3UxcBzA1+725JWshntEGEO+YjWlcrU4gPJPE + uwZtY4uG3WbtgnBcSyP0j+4kuPMDMnvtIiLNcQuMEAQmIJS4koyQgN+Ic8uABgMD+NHyvWM0SpZ6 + iXqsckAnkgMEAcRWbhwt11HtMiWZ0VU5ZK1sN3MzMTO1OF+ILQzfOJmEctoXynPaBFrEsOOVGnXj + 0dS2R0wxRcU5zubi5KvlWD/fxAylZi3dmsfh/X3lS6H2Q00vP++kYU4D1QSg+pemCl86jNsalLPJ + PD+5c4Pj9wUVpcwLkOJQvJ7GIQVO2KAArL+4937fttbnscI3uJcno2zvVdwu6OIgLHhk8znHsQu4 + DQ3PFYVhHAeyM5jYKwEW1VRpgmjYqKw3KFQMU+w7hWDJwRhsmNWEjuXwEVS/RgFcTGM6+SLwiU2a + OSEFMYik4ySoaO4Zyt1Mh3MOKBKIlWoOI3Zf0gqXqGWopFyIrCFphIgNpqUuo3tXxDDqHeKo28RI + G/nEpQt74m0L7mQ/eu4WdYbtuf8AcQ6tMR5dxAYY6ntm2GwfUvyDFpPcLg2SUqHh94p2pcn4lCu+ + JQ3BAPabX3+ZaC2nJKc0htcjQ7hhUrzK49YBD7R0LB1MRm+6OWPtNmEcJA5jN35lSUbzEUMWopFK + OOyKIqzkmJM+WDUqSjLbZAKwIgynDMZdPUXlK2ZxdwwngZmOnITEGpZCG5zBO6WXTKivwGEYeEOu + MBEqleIBdiUNrDO8ib14hCL1xKyEm9StEv0Zg1ywvW8R6CpNFzkuXKoG1fxC7NRyOXULT8yruKEZ + FMa7gIxhvHL/AB95VHK0xA6EcERLVr7j+Yoi6NzJvywoVqpX1MyusUmKg4n1wjqO9C2epWa8YlQN + dEpwGMQMI8e4mG6c9kJjJ5JhhgKvBqaidGFtd8RP4WeTNwpwmJcK2WDaxjhJ5IN9oPiCpVzBrZxG + YLH/AFS7G7ydSuHJ3Kg5RrkmNhBmxPcLyImSbD4YnCJUOEq9ypZ9xwa1CsuHGomtGCyZmGqlKKMW + ICIUwBp3NwLZjzB04meG5QYHmFzYEpVmEKimx+AXgamXlUYnuMXYu5drVS1NJCNIaeEovO0MAA6q + J9JelMtyqla/38TERuHzMzrxGuuoocPEsThdRxqb19YTBjhmwBmmGgqpWt1Evs1Pf+/iZwxlHLee + oA+hmlI1rNTcrfshtu/gihTL2KlW7xOvM9vtGKAwB9UXYpPNzAB9IYNh+pzMm0Wqj7CG4jR0gkY4 + dMbC6ECx0jiUvb5gOp3RZU1lFhiVUqHZKzMfuGjuSvPyLZAigtvsTwVwaU3MXEumoo1LbhrBxi0y + zvXEtrqDBEZycQsFyVSxHVG2IUbOYHeVhqjDLJ5xCNV1ArEtBWVAmNFZT5AF/uKhUzJZQdwpUsNQ + ssBRDPvcYDRpD688B9JkNoRv6wQArUasriNKo2x2yoijoYY6NGbnlvmaRC2YhFsZlTGK0szToj1K + UYioplLEH2jHsnKB5I4tIw4X1BmOHQ76mFs1kgpappGpos1OWaVHr2hYrzGew5ErcVADMEYmeKiP + LMQuwtn9xDgczGmuZQ6sRLOpZuLfhUZAiwpLkMzHLMpbuX7JdyrbwjTO0QzmQVg4wKczrApoMzGy + bnXUWmj4Rrh0jrVhHcCVZK3ft5j6mz95UuV/j4UTWJRKl/TniLgG1vq4h9lRaqzNnncHtL+OIlGe + Jssde5l7V8fzG1UNZ2TgGY4R7Zl3gJ3QMqzuXlEFKPYwX6gDUXmdHiK/OklBXXc486xqKwpDVGOG + fhCaFxbn09zONvEO01h9TU4TcSU1zLk7lLGtruZpmokxvukBmIgpMI0iilaZqNmVJxWSC4ISVVnk + UriQHAcniZtZZkJAcj3KTcpWVstUTwzLqFOe2I7HPECrzK9imLZUFYgPDAbXe4Mw+jApsJgTVGYH + A4zT8eQCdtV/VKh2EW0Y7jEv4Cxej+ZRg6mmq6ah1MUBo5hSOSrEaUNLIe7V9E8zWAHETwL5gAKR + BhyViGmtVyzVT3A1HuGALId3O9mGatuyUNpuVkoJ7mpNFx7e8wsERwuCS1e7mdc1tiOHJhhReOJn + F1BR1fD5lLBY+PYG6CHeYm2gxBVpvCsHzQ20LVyl7czIviZuiJKXKuSy0qZ2IW5qZKdxpz8k0dh3 + A0YSWlUzDIWY7RcFK5gDpxeJtU+YAMMC8FQGoHqhPUuVgyPUbEAeSGCIkQFJZGYlb6vUBXLmyLeD + Dsalvf8AvzOhjbPuMuLmT+47TNFUf9+YKFScStexUWoS7LMzuCzsQrAubneuAs34zFByF1B5FlDS + V5nEF6WMW5DgYIgp4HEO2ON7iMuUTaKgrrrtMX0MkqPS4Z5II+cczY4lhQa9y4oWe66YrVxcsA1b + mIBwKJNjco4go84ZWjoirkhmcxK3K06lAqN1ep2I54ml3BurGXraEP0Z9dR5ODpFermW85jVKsYV + yotJkg4A1/M5OUA1eo8rUt046hqESpn8zXGX6fFE9ZeEuZgbhwitiiMVWMBaxRNhK9+UYAkdly6S + 8E1jrEXUGF7I9FniZJJS2n+dTK1/vEr1zYxnHECIV5wwMA+B/vDDr+vcWy9EZzRjx75jtFG7OSYe + 42QVSnuJ4FQAu1kTBv10x6WS7uDlK/NniLuhqa/6lMKnzErr9mI42tc3BAwajAirOGBXG/WMkyyp + 1BHWU4qotdcVJohNrqXBmttkmK0fJBMN3LYJZZAsTLx+DXSHtYruWO+UoqBfUEgw2FwzNsKxOJkT + BH/mc/UCbzB5RgK4CuNE8q+CaLvXmXps1U/uIaitXImT6OoXe9wGH8wvOc+Tr4EBUVn1GRdBXVXF + 5DMu49/icHmoqHqXUTC7jdBg0iA8zuIXK2ek7DrzxBUtdS3ZEKlfMLXv7KQWoXMHKY6ABeWrlrCY + R4TEsF6eYpKLoMcTK/FCrqxjGWy88SlNDwWnqF4LaQjZGfDMDiYfycR3iy2YSuIbHVQNtg2uESSE + pF45w1KJZgz3LKRLkw1HkB3Ct9GIM7lmSxTKDuckt3CuXZO2LDdPWeQdy4RfUw1Z+UcVZ7h4stip + StY5l2ODqB5FeEb5AqNZaqPAoeYhYcMFQPNZ9ltlPLzOqHM/cuRK9cmcTDGXkZIADkgiCyBh7WhF + fT4uGOu6/wB9Kj+8C06bjrbOQruZBzuBzJGtle89RBnE9lpwXM37xxLPPi9TxPsYBdjMjFZ/Mo/e + Y1b0kEB3LAG04mCdTMG/CUbNemrhyRiAj3L4g3W4r0DmXtCxas8viIwGKPB5gPMsxMQ2Y754/MuJ + zHkhtIguuLI7u6omYviG7mwjsfWaXlnAKjRq5hTSVrURoOFsBUSWOXPcr5NL3OZXszGER3DkNrNg + ioJrH2lywuBk+kQGEuIU56lqhiaF+cz6DqUQX1EmWFDe9QfDN1ESHQxj8X6JYO4aMPWLekfqQBDK + P3EGc19Z3Q4e3L+pcLvc3zT9Zjg+8EhxqWLdti4RDxK+efKWtouTmErDniAvjwJArvpf6g453uoO + m/MTjio0J95ZoMilLCbb0XNFSFQlrMlQC8jhhBAVJcrdRsc/eZx3qmX5poG6gdw7IVkmNJhKKrOi + crAEee1sOyqDKq+4hLup6nShhyCDCiKc9oRlb4jOGiA7ZkxNaSEQn1i3c5CGTFvJa+8b9BVOBy0j + qDBoG8LlI2TpKl4hZSbywqXVxdKbTMolB5JocowtXJNr6J2cTJKDQljai4VBcpklZcj1D7vEFaW2 + FFz8eGmWUnsxHY7l4aSzr6PMYa3J+03yzNbFMWFQfY+iaHF5xByLvFxoMHKNxT6vcDlNf79QMDtc + G4GDXnMtiqllHv7Qq1cMibmfIPiCD1MAmb9xjiNYTZW+yLLo4dQbZU0+0Le7H8y8rGtcwb16D37g + diUEBM0+YAZFzmoto8XcW9nC5mSKtFwk/NDrwG4bL8IdgjLGF49IO3xAKSGNRKE2AIY3RMZlovTL + ZGQ6NFk2kdnE7SppmJSF7IFmqEtduTOAngn0oE/iNWscwRag35lA7rywkvDm+5YZnmplUmMQ15cS + gpABOp4olpqxcrCgoEuflKr0wt+pa9qlT2iO4cSmlI9yPlh5vaJWJ9OJi8l1TDQYTatMSwbOXmN1 + 27mAfcWbDPXUoC1VFlpLU3u5dNyg18PXgXFBF3VGo5XMCOPEv2rK6vpEu9Z3uKPIWW4H54rkPBI7 + htWu4z/sxHeoPfTOKmskDWJbcGKSCUUL4JlATD0mCPRCqFnJE5SjuAFQ1mDbqW05zMDcPJGbVymL + Ms2ywO0tyDcTBFpwwS9ZxrWIRvs6hFMDvmDd/auIevTncDhxttcN/RVmIl6Oe/hwR8z7wzn6/JiF + 9H3hfUYjvOT8TCTmZ15KhEK9S8eJQetSkG+LZeoMccfdM+xQVXriGLwsYq3PDUqmVanC33zMHOlN + uKiXTPiO8Dy/xH7AgiHqOwlKbeuYoCZsxF4axBE+JhceY8+N09Sg1+4WriMil6KhBpXGZYiWko4M + vDJ40+proPHc5wlr+WC445qLZK5mPulmsq7FDF3gRxkumT6R3Yc5TUOFPPmVNVAHXdsoUd3xxA4C + a47CYGPAmV7zkTNmG7i/+X+yLHK6NZh+EUZdMswWGUGEzagmE8pSupyAc4OJkFud5Jg885nVMSzQ + j4j6IzuW20MimZXMqVZgmGDuG1ygjJq5QaCqMP5cwKB9zlvhwTy1wyh6EPmqcdSmWUMtoeGEjt2u + +kt3P+xqcOJlAb8xRH9SUpfuic2ugwx84riYfi1axiZCC9sZ6/7/AOSjFqdHUul0ZLZq9mLJ4qXA + xHBio79TFeJBdad1KDfyQ7pUuqYJrxc+qacQnGbm4j94gFK+0Qbgg3Vj+IsHRMEW9zWQGLi7g6UX + RaFdwcEpCgNB7yhTJZlqUMMu0z/cauW8bJUZWgYH0gxbPjb7Sl6LGAn8RVVqZoEt6p+SXqA8MsCA + qblmqJ6moE4Ov+zFClDVZ1ABZUA33RV21MG+FtpSUFXlqG28rYrBvOecoWYA57id66H8wWT71xtZ + GWmCICVrmL5gdLgHnsxaF7K1AfFVuHj4r7zTb5uXWE0BrzMrq/biCmujm4mnT7xicTbs3Uz/ALgC + XaQhgJ9oBQZ4Z0mE5OIO7Sg8sU+eM7lwmHnEOUMal3N0yS8qcle4O+88xwRyRT98TuTAvKJrM4mK + ytin/sppUylgzFe+Dz+SJwJsvMWET2mAu9drmMFGDZ9Z4yj8YiRGyjNzeLlOKlhbB5mrxqF0xtNw + tJtOAmanpHhg4yRHBaRdf8IXgExdGY0CvuXCpVNOFTKtH1Yg8C4DcyA2Mzcm3TKUpbahxHYtOdRD + KUu6hBcy77got00a6/EwFKazGaLTJmY1XsqmVIHqbC9RX0t+uYAWssY469R2vvWohwDzUBmC3Muf + B2uIxm12+szHL3Ay3CmzEdscIDRNwZx94vIr8y4XtOK8aYyi8zwlDlXPUd8S5jNykozldTbgPW4C + 24ZzBaPif9iOdFDdfRKKp54mTj3Bgobvcxq2/iOS1F5uMKs+IbxJc/AgEUXD5e42+GY7lDqGVf4i + 5WrdNXDMVRVD6RCs1+Ep8Th/EQ2cYvUXDsM2mmWBRme5nU10S7k4YQo0zC1FrivVyj7C5vVjuAOu + tJiQoveCEqi5+syebMF/hjPHZLqI6A7tm/8AEVCaVUIhMdFKKTpLDFxMBr75i5JnArxOnRjGogQM + y7emYW7N55jjc+XcA/YhdA07pJUOJ1NMx/owu3d3yjaR9YuI5P4gcV93h6S1Zd7lluDdTNuM4dQn + Z4rMSnk6GMlJq64lKFab2xWji79/Ey8XM48QyVnOpfNUGBijVQZBSs4fxDtHOYqlDMGBA3QsIAGI + mLhGjb3KzmXEV5PEKWUc6ljs4sRhgv7S8tRiA02QKLjT0HqMK7CnP/IYLrzZYaFoxffU6lO0feMr + zxqh/JMjA4MTKpZqUaLOtSmZliQYpd4TC/rMeDgKphVtOLYDp3KX2A/iYfFB2oV6wPEyLLbgdHLv + DAAKCkFsjGLB9ZkCl23RE+l0JNFOjIckOxvOkzAn3gCM1bzvUsqsJp+IrsF9j1iVzYeVv3hCm7Uw + zQF6Op3EWhOjw6lSwFEArv8A2JYs8as4/wCQENBxM86TdRwWjey5ghj6MsFdtheYNdncyPfMQaW0 + xTdvF4mBhvTcvZx0IEkeBL4gWUrvmYFl0Zj3OXqV7V/jKBDaYCpq3AQRqpWy8TDI3BZNYJYatds5 + +YgFypgCZMm8y1ZjHEALp7zM9LEdt6LXBa4Y1fWKEAcbX6+I+x8iZW+frDSjPcwocE6hV4lVSm5W + 7nv+EogPBh5TfBWJVLRM2iykg3x9oUAOm0ckT743dkSizOxIB1BuXSTeyIjyv93DrBpqqhaeH35h + SquGsYiel0EGQxLN5JScuCjPuJvreAyzly4BzCUouAIwKOTt9xrWEOc0QQpgfX8zSxhl6meA3f8A + zUzrTzTjQxYsTNmX6tQfaGAFcl0fjUxa1KqxKn6QagNdCKZ3iLFsfd9oP7FMdxvNdudEwQ5qKgan + d8PEops3bz9Iqoz04Z1KFTqO6ji2+qKPY8IXYpXNcTWkYcoVxxc0sb00JWJYFDnuAWl29xsD0mPZ + NWgt3iHD2pVY9MGM3Y4ZdTnT9JRwGZ18kejZgaKo1ComB9RPWr9Hc7g4hCN3X5mkK9RUX6dwy2OX + uCQ5/accPuvfcYWgU1WBA/kOZVBGcV5ZhZXiXP2fvGV257iN1Y7SywtXLDdsxqWS4MyljHMUlaG8 + YKlBf8JmQDU5+h/UftLZvZDAfaA66L3zBhsNVeY1CXybiOXA2LYoyxug/qUcP28QKrsYdRCwJdIx + CqAG63MluZs3KNhTlVW/8hHViPEUjMkr6oHwK6dwRy0pYDuOQH5RWdZ0OIqjzeU9TKZeFuMdPMuj + E7PUrtFYZX9pfJZyTk4AoDdxWU0Grm88rheosTPjfllojhVvmXZ+79cZg+0fqv4hu3i2ohLeoc0g + sqANC3Uqci5WSPqOjgJYKwti0jPfiobmGFhCvcRMdOcOoTmlRLxmOAFU8TfZHLHY7IHlnOCr9ycS + SVLxqVkznmYri3l9YLz2Mnnw+JdZo9CJuE9hBYtOMcS17wVDjzOod29xreeXiAktczLwuG/lKe1p + K3sdPEa6OpfadVm+4qRcyvSx59zMrbUFDjuAOTcW0OJsbO4izG4xuKAe3Eo8mCrcrABwpywor2OZ + lWml157llllfWpSJvwTIuOxuWdFO1MCtubZZCA6zCHmpA/y5utptvEY9sHXE6gdSgyvgq7hCwFnC + iBQyBeWJ4QY0dxzyWFo2q+pjpaY5jdJOXlPSxVoeVWsn++s18wS2T6EbcHQJXgoIfWRYzEy7Cz1c + qDq9ktgAV1+f5IFfRpXqXik/VUMbpZZUP4mQ06QCysdcSwCGO2YHLyZn0BLfTtirSqrymi9w4jNM + a0sMXvR4bh1M6shWbxNBqCTsacky/ijUzBCkDzUMXRfhFg7LDKThWnZK3n903ErLG6cwWTuZBFZE + ycyks5H9Sl01Gh0mV6gMghz3WXipVl0ioZg6rqbVwCP+0cy8VHqn7z3owcMLpHrFRqzBVoB7S7M6 + +5Ay8Z4jLshhu5WcjZeoF4gwoUWccXiIGiuEAyFRsM5VnmcIpx34ixu0cy0VjWkOhve4DyHOD8yq + W/VUU0q2teZQaePJ+kz3Sr5zQUvkXmJHnxLDKj8kUBJ45RA2ars4z0w4cMtGcywoCAgoZbF2JSwd + /wAfaNtIX5OIqaDxEy010SJQgq+wlNmF48JSAewcTzawlfBY+64DO27dzMyrInME15Q1EpxCrSpo + 1pTsA8S6rnB3MACuLZaXD03GrleLl06j7witdsNQvU48JG5r2xDRtgcwqdPGX3Lr/gEQTaIAdi4F + UWleHmAmj25lI1yORR3EHnl3LJ3zNv6igUi+rzODxjCiqCbVOFjahstCol0pjIVucRL6isQq3b0H + MzZxrwmFVwf9hUgLNK2e4IRFnKS3D9FRF5daMWFL+pt3XDVQZVPaZg7AUpoUeHAxYT6DiW41sP8A + CIKHsdeoGgL7ERJXLBZCmiVzbNzpGNcxFul+kv0d7IlThfZmGgssJ+kwLz/CZYF53cBXnU45mda4 + ZxEA4aEr+IctWUL9xFi1trTuYsfQuKlx7WsxgYL9kMQVKsaanDdixafU0t1XdxpyekIvK2cPP9TG + Dy5IVsOY4nSumfMQylObH9JQgplcjM6CjFMHOtg8TUF4sXAvMvOTJi5mkAuhWjd/SOcVjmYKTqki + hjVCv5iesGPUOKhAaftiNoWOS+88TQTTsp/9mw7DNdstabOpsCwtriBeY5fxMKzAcaip2z1KsvTz + zL7s95zAOG2Y5XnlhqmD3NQhFkUacg9QXtd+eIyYDlY9BaGRqZ4ws+IGSN07lqJN1KzbN3AgvsR6 + kZLcxdmWsX4jsw8q7m3zGK4B9SXNGhcsQRUHhYBscK7QG1PMEUxbzcaxoFai03A3OWYOTf4loaEe + kq/wTKBjEGqzoI3MdIHUeRK/SYzydVsZnFnxcRW23cogFpclscC9l1c1uZ1Y9wSrLx1gQxG7/uWR + vI49QNVrV8XBm0nFOZR7NZqjqJSuHARij6Dgy92RkPcXPOXG6lfGeBDoDw8+ZjNPcJFg4liIDMCO + YBXUVmWFpgrM/wA1EpvF9zwp3mA5FNXbK4seB1FZZvHkiW/xmDUCws/mKsh32EwFK6y0MKBFuziI + o7CDORZXfMF5UclRhgvlmuuScVi+4OnGLggwxO0rloziPM1m/EwzwfaZJbOCAqoDkUxardBmF8IJ + eeJvGscxK0cO5sduAU+8NyKuCzsdvUPMTUrjZ3Lc12DxKjYG8uYIavG4kVjeV3cRVziy6JZRrPMo + 7t4AlWyCsCeazMmvtUB1xCmIpYJxG6ZezmJc/hgPO61Bk8WszJq9qZhirl9sKNdjeBq15s3K2Oh9 + QTIAPwjgvYUcSvd0rDCor8Qd5gcdnERUG7zBXB3gysdPMCwbErgepUwNLnYHl2sv3nFuDL9pZrLa + 33LoqoLQdfqNohWyJassM0p/Ty8hp9XAQCqmMBXiPSG4mNlTxyiV1P/aAAwDAQACAAMAAAAQ2Bka + WXEZ5b5eSoPlcDFHnEHIyQ47GQq06my+CcZEkt/966GCHNKgZEcmv9YOInA8CxWQueOwMAQSWN5Y + 9Z84PAYsTa1cVNZkYeATdhTJWfIjBzHz87GvcAPrfEzONH7gZ1PuQnKxdxaTclK1r542LvClcclH + oBhrcbc7jPR+EvsKvVrOJL772RSzC3BXfj04YEBK8kf11C7CSQX2fo00ZiUuNBEDKyHBZftpkGsM + KFpi9yl+OaOoqAdStnD03qDUqekuGIDVZmOyGZZ2YqRaAdoQfKIs3rRaEUihkhndyItFjrGvmt5/ + kp/tUuhbkkih9EnLkNDGEkmjuGQ9HxeVIqsHtvov4FDBsPsdsTkMCSLjhYJ8fFpqnEnC67IdBQDw + a0TZLEStfTfO9K3P34lSrwflhOfaC79GpcafVEIFa0bzDPwzFe+wKY6hxSHnraFeflZ4YeZmyCw9 + g5bUNYeaQTCOeN+bZA9GvJOuzNlEZi78Q8nyhqhFXECrKT6D8HTBnKKlh6C88y/nWrGFCY1DkLZP + fRZ7KnD3AXNIBIm/D8/jd3Hc2etm0yJFprXCxjVFw1EFU0ETyBxDbSB4H6EXtQOdwhp396dsDuPa + P2bG42QyHeDWjTUSYnBUCrx/WdfTKebnWPinVTLavq8pAQ8zSlq+wdSEKtdeJqpOn7Wkadnkmcgt + D16eCZ0wFqa+j54rfyG9sOUBhtpO9sXWxF3JIbHURpJJw+gXaghmWe8LhPW1Is2NNMs4AJX87WNh + aBy5RqZU00OooEW76FJJBPOWfKZmFJQt1z8k7syEuzIUibqf1KpqHJy9VWQMGdGzLH4NRYMeX4X1 + fRoSBexaVyKe7zPZRZu0+NgAYEQZRBOx82htVctSPDSsTaD0rVVBFplIb2z23E6AyyxXX0dXiO5R + 7R+9LQKXY+v1OIPHQqhGLRbUPBK223PIKCdXrA2sGhur6Tze9l/ZoRU0nLIuttbx6NMu9bc3OoHZ + sj+AB7yzHSVUZE/fPwcLNksMOYSJYQWkyWjJnNyCyioTIcMAzBVkfMkIh0KNRYc+F24tLoCPxw0b + 0VaTMA3lYTXcYakRlg4LKGUxLhap5zltHoZ5rM48dT32YteWPbaywJMfQaDXIq7zrQhQi6M3jts6 + 6Fhly69nsm7+WTT6Dsp7J8JvfHvShEb1HOtbzcrP7yzQErFap5QSShA9a7D7WUbYgacoTwrnjpUm + 0zNij6/fcv8AihbKEhKJHAuKTuHbIlxL6Nk648kxdJcDQ0fohZM5sDBB4uMQ73Ol+r/Tj9SUwZKV + KKJ6N+adoVC/pC9SNbUnoHPvTeB4ZTZ5jyjAAcIBgY1sL/pPHaCveeKaRJTjYS1Xxe0L7lE1uwIz + Akn+vzbxbjxo+EMLlhgFS9AjjDw5jOV80zzp7W7otxhsbG4FuN6uvL/DRc60p92VWFKa4X+5P7Tn + BnMrCu+McslNkq0OWktdyWMKSdyl5SUx7gq3rbwHtYv+DWi9kuMK7DBVO+OkBpSWDg7LpbWN8Ije + h+p5ZO6iG7rDqk96twD8m6Fkz0m++H10W2O/bDkStYwaDCABswn/ABJLUbHI4vvIHfS1mfpPAXzj + Hnsjp920DL22KawdQgDFdmGxPasaSS0QqVL0DsGODpKZUkBVI0xXqVQ5tCIU5RrQ15UUrgjox0u7 + cTHOn4Qjy4jtfMyqXXxCK2m8iv4ZvD8kFuicbmcUBg8gE/mcxsApJZ8EfLCSARVpCWG6gnRuhEqL + KJq/o2pa1YRt41EGOJzd42TBRGgc0LFRMgZHZylYizygTITuj6daNJHvhIrgmaumCvnIk0ZvBZaC + kx/rKk7L903rQrRZoZrWnehRp2ryo7fVFu6+VKdUt/xWtQ3on8ePLLWp7iQXaRbS+0QJnsIpGrMi + fAaH4ykmmPLptuB1N4ZKAMqxDAXn3RLObgJbovy1LM0w3DUGkc2xdBJRn+bSuQ//xAAhEQADAAMB + AQEBAQEBAQAAAAAAAREQITFBIFFhMHGBof/aAAgBAwEBPxB5Yyl+Xl5ZBdEMYxiQg2N5RWIIenMI + cKhsTvyUY1v4o2LwJnRMp8azDjCEtDWGmaUuGPNEyjeKUuIPCGX4oxsWELDGh/CwiaH3EEoeiHHj + mErwhzonngst6xKPRRIxITXB8KN7xcwQx/T+qMRfkstHBCFhjRDmUT5LK4MoxKiw3uYTFsjNlKhu + iWxI8IJrNfS4SotE/wAJhCGiEIJfCw0TE+GP4MaINDWUsvbJlD7nos0WVwUIQYlRL4a+UqNOCzS4 + fyhMpS/K/eAv4ja+YQ3lYawsPEukJvg0YXfp4sLWIYs0o1sX6JvRBsZBcx4XD5mlLS6hSlHh/LWE + vtdGJC0eLOdzCHMCf1j8g1sQ6sITKOeI0aG7KESC+Gt/D3hczB/CEOML9Hw8xxj00j0iGxYSzM3/ + ABXTrGXCGgSnboQ2j0goeIqOsPnBNfpVbVG7+D9lOhpDJQe8QXRYTw1hsQxHGJhi4TLVQ010b9xY + NSkJtFpRDY1KSiInhYQ8v4pS5uCexYaJhaOsDaQj6LXSTa0hBRsBJp0J1suha6QlEejex4J7E9Da + /wAgxKk+EPY4KLaEhqkmFlMnXzpcspcLDExspSiOMTwgkE3MSbPbNEEttjRWxMlpicnotPCbTgg3 + JhorJdj4Ir8JjuGQWW/g/iYSpYLCYQ2JmvRN6Em2MrGUo83MEcKUWGsUTHhMSI4L9ZXFwexsf+0T + JVl1DbaHpTYaNiu/wqe0MmbHCpTbxKjgjRDQ3hRPLdwmNjyy+FgnrCyxv4U4QOTQx/CzCZo8J4g0 + TD3RoW2xWCEh/Rz2LuhPRBUK9ejZqZ1sWC09Dx/wa1gi6MT2PCWhqjYsReC0IcLoglDhwJ0gxvHg + hfDXyulG9lGxiJ9P4g0UuUzHbsVPQl0NoN4fgTiiKWJNCUSo/wBGq7+kiY3ukExwGicKbGiYhwNl + YYmVIqfBNDY8dFwUWnsa0IbDcKSohxCKJIY8tURPsmUbGLFKWm+4aqH7HpVskJsVqi9IWmGYtD/B + uLReCKwcsTqh9BmgaqMUto0KFRUVI0aINYSE0URZh7EG5oolSYmb79EuU+EXC+H8oZMLWNEbCg2s + ZtJDnVwUlGNFA1We4NYQf9FXaWDSSWiWjiDQ2yTpRYPhxvDcQmoMQ0hifF0PC6EMl0JMuTQhw6It + w8Jj+V8JEHhDylsSmPpY4IQhRDdP4Y4REIbZXR7C9mIalFIbLY2LSEz1IJ+Hnw7Fgh7w+/LTEoP4 + MIcdNyio2djJvBNfDJnSHhMXzR5bKJUlIaxvQ+nRfRoiCa/TQ1RMIYnppqj8D1iPcUhrZBIbXDpD + Yn8JaGhrXwkS40xajYwuibOsEz3LJmbITCJv4rGXDRsaEhIWkMuhdGrh4JFEkJ6olBDEPCUUNDo9 + wxPRS3oseGLeEzwSzXCDUHghF2NISpBQa3TwXBui0N4aeWIWxZNCRTuO5YkWYRMdD5jwWfRE06KJ + QaIWXh9FIdZ0XCEXRmiE1hbJ8IQ+YaEQg0I6QWU2PKGsw8E4TLIeYQeG9iY0nseGxIVYYj0TGoJe + ktqnohl1ko41BMg6aSwlSYS4aolOiOiIPEEIao0TBj4JFZYX1EBJ6N7OkEPEuCGszE0QgzmWsJhD + 6eDCJho44J/o9GjozdbHItkEKKP8yqbiHpxeDrQza2cxBRHTQyjFOCYipoa9wsUXw0eDEhGxJpmy + 2aGaTIY9C+EGsQaJhJiRKQaw4XNZsZKWWUaGsbguFiNm2g96hqxmvgvQdvbN/hcUY0QScP8ApCsr + Qp0emLZDzFmKdIQ4OkIP8CZdEjEmG6sEvlreITEJhiEJcM8+OhFGQ0Uf6y/hSpDaw2wpRnfpSl+G + xk2RBsTFGPgm10TpwbEhjEMSIIMpRiX5ifdIxNZNQgxMPCWZklFh3vxMz8Hogxwjw792yHggxLdE + o9CxfwfoUhVHREy7mlHs4xixTg2xYUrGhohRBo6JDUwRA1MTDx0Pkxv6g3i4YsP4gkfpDbjROBPA + 6ENmqJtoab8I7CXUL8T+I2Q1DhROsfRtItyx4eOMJm2Jfo/SI8I2cYwkOFQ/4Nmww16MxCPMOsT9 + x3MFhv5TzCCwlS2/grUIh7Itpo9EJ3Q7I9YxH4K+jHEGipEHFQ3csTGJj2hC5ofC1CZdDEWFLBol + YyoITMQ/wNnis3+ShvBrYtODQ8UpS/4sTw1iEwiFo/8AQJtdGkEycY1RaWyBUPweUPZ4WVDTXROC + eEMuhsZs4dlqEoiHkxF2NU/ojw8bEiC+H9MXo/MT8YtIev8ACHv0hsSsSIccLmfgtkGqyRhpkOif + RCkx6KKobIQnijWhLCRKcExDZRD4U/o38Lo8rDom+PKxMaJLBfw7ofykaKXHC4ol6NwSpu0aZg0V + k9KLMWGo1wE6LRO4VDh7JC5ehJfBCyinRVDebi4XxPilFbYyGhh62Soa/wA7NCQ3Fkv0/T+hrDGh + ODVK2FRSkBrwaqYQmfw2H2Dg9Baw0e4o4yCwl+Yg1oV4Vfcyn+nRfhMUSbcRBoaOmiqGKUf+W0Jb + OI2IgJdiw+HctQ4OkJ0KsciZM6G0PTo3ujI/4IM/4IsJ62aKUqE7hD08J5gtL4fzzoyCdWZKjRIf + QgkeU7oemP8AxSGrELRsLHvLOPlCEJsKjEwNoYujcNRwaJGMeDFSYQgqVicHvZHl/kok/wAr8Ibj + ctxQ5hM22W/FxzCQ3M7YPZiaYhnseHrKY1Rxw4HsaMYbEyxiKNqDaQtIMTjyspIcF8dEkJrwfzr5 + hwpSvBNMc8GcP/rKdUF9o/g+4T9G1wL+iTHRtHFJhkGN+n6KYZcHhiYqRsrcLbIQ5iEh/jC0i0uF + DU0c+GszExcS4Wo3Gmig1RqYe1hOCCeUPCPMJUcczRPpC4UChDHh6LB9FDg7qLClOMcWaIergjah + uCY2hq8F+sN/vx5lIaxS4bFi4hqFogkdj1sQay/3H4GoxbGLKQ3RIb/MpDMQWhNoeGejY1ofBs8S + 20Ue0J1bw8KUSYSKDrgynqNzYtC2L+CGqcbE9lg2nwrQ9Cl/S/gx5fyuiUPcMUUT1BMfwnBE0Jjy + hiVG8RBUJ8MDUeCYzweLs0EwggtM4PotjwuiciLRgtdRzB7Df4JaEM7ookRHvw4TQkXFzENEFwSz + LsdjGjEy3L/cLhwfMJFmExBJdHBsFoUyzaxwbG4MN1FT0x6wggmuYWs9F2JhrIFhicEmX9L+HRIJ + Y6MSGssq+kX5GTwgpB7x4P4JnTzDwncIdLEcTdIsUo8IaMYbQ2J46sp+Mevhi0hdPZjRY9ESGmJn + TGpibEn5h7CHD3FKNncvVIe/rTAEtw9+Viz+CB/IaSWsEkvhD+fRtDZb8NsSYX7I9JGg0UG/BJti + 1pvYTBN+lNMcvWGeYgzo+ienjcx0mOkHhDxsmelxANtGdEdpCC7m0V6HAOkJldHm/D2jcPF+FtCB + MkaL+44W9ErobxYT0NXxHZUyu0IRiMGEm1HiVE2QhxFPxhPFFDTEyiJj+nsMvpENCLRDaekN4F4v + uO/MLm4Y9IZcXK9f01Ra0UYnBalJjddGRjRWtCF+fTS+Nn/R/wAEz0gkQeEIX9IyDQiagsGEiIpR + DaEignSYnzRsTKXN2Np4oQSIiDRQpbi5YxNMhBNog8c6KMa0LhSlxWimqbkKUeH0/wCCnpCTmUIR + DwGyBoxAiEyRYO8TWaOCUblZcWDCfpstGg8wSxRo3hSjeJ+YmFzM3RI9fLQxP9NlePcMlETCPT+G + 3oepCwzWBv4Nn0asnw4zsKJW+CKU2MZJUbPCZMbMTENen9DsuiopSivHhwp0g0Njz5laKaw0ZOBI + fRCU/wCDwulOlF8dFjRfCnDkMUppGPp4GxcXDp6Ext0URUXYmPFHMPCZNjSTNjTIV8xCI/4T52Jt + dG9fDOZezQhyiY9jc0dJj081lC4dKdNFKXC2fw0wcew2N1FA+xKvHuUXRJCYuU0QxTZE2F8f8KaH + S41lKqyDTQsQel8Nwp8P6Eek3h/pRMSFo9Z+hPoX5htoT/R7PREHjgyn8wlqFHjIRgVKDbJaHeMf + weGsJcIXcE6IqQQ1eU8MVw1jbFjTIJwqw0LEJliawtq/LX4I5hng7wJrTP6PK1h5gm+P5Y02KHWN + pobjo70UojvyajwnMitG3EIOEImFpjPPhf5/8xcP0UL/ABTF+EiX/Be3MeKNlt4ehuGxJ4sZN4WH + zKHJSwZ1hCLhGw9D2J7OiojpdT6/7hr4hz/FOmszDPBC+4JdioPTmF+4Z6IQ+/FL8TZvzEF8paGJ + 4aEoJCy8r/Jr5p3MQ9KiNCTeKqbIXRHg/lvHYb2QWGenvzcrEzr6hPhYhF8PKZ3Pvy3hZ58f9KpR + L1lrLrEG8LBHFDuj+FKhcHh+g3W3j//EACYRAQEBAAMAAwEBAQACAgMAAAEAERAhMSBBUWEwcYGh + QPCRwfH/2gAIAQIBAT8QLZca5VrDD3OGWeBi8ZZBGEszJwFIJ2u8quWPfAZ3YekqWbxTie425to5 + JwrdL7E67u+6oQvVqKh8Wt2P4sd9hvqMb1CQTpDrgWhdHuWeSes3kb2L6TnHRnrDnRb+/AFPGWRK + JvCy2xZeW2yy4yCHJdmCDgTMdMZ6Edkc7TN9chlvJeHDbsURiO872kcCaC73D7iXsiH0I0WRZc3y + E3qDZ6lMup7YENf1DMAvch6utxd4YnQttmbINsyeBhjhm2XBd42ZJiOM3gmQy4PDDOyxyzbOFyCD + OSG8U7lOHbo2KXXXgubBMkb8tPUP7j2lVnfC9j7IL3aInVPqGOjIp9Z+qP1kW9CJwc5PB5bxlkOG + ScHSLfi2WQcbw2fAyyyVbLl6uHyWwFyw4SHU/OKd7fTEjwsGCOF1KwHtkbebJ3hAE2QydCQfbF0c + ugLN/wDintPko9cE4mDLMLLIIcms2eQtzWz9EMezjIyXrnbPgHwS6WEPXAbMzm7pdzXntBhDqcH7 + ekj5dUMZa9X1BdQYZe+TluQj5wuEdrpLcScAEs6eD/3EbG6nl3Laythth2FlnL8MiLOG+whltmBf + CPuJXAjkx3Gn8gLFqQii+viOcYTL3w71sNlkuE3q6aIL1JDSc9Sds3vePMOrWGXjux+SYcG6y3X6 + rz5be8A7JYI4G3kmRZFke/B7vuLSBXqWTvWsBYQPvZ1+r1+QH9g/cfQ9keoZy2Hc8i7sA2estgif + JrwOlmyBy/ol5sr02VGNo6kOsLB5bB+2Wcd8lQ0Nb9E/+owMJbxjfruyJgksghlt5CMW23SO2/BZ + d5CLG3qDseSHC/mWg77dkcLoDL7PScdOm1tgXccrouElt9xp2yM2UZw7eIjju29F34feF2zY/JB2 + GOr66hE9rRgcofb64zhvEaL8pnAWdSTFsxmxbES5bewZZ9PGl+d7Y9j0IR3MjmBKQPqDey65O7cv + +BdOoh8h4D1IXr6um2xOktH6TLDLrgZH7Qj5DTivcHA5wHV52zSk6xh0juSHCBx522Evbou19rLp + LeBCeBvLwXsJiM2Qc6R63p3PbhH/AGMYm+7KOHhZ3TQdn/mUxj0Mh9hH+rE7WZJ3LbtiSaQTctLu + VgGafINTB2ViDvwvk/Rfd5bLTgT3qyGTHcHHjKHdtCR58e3U9s8WuMcxmMvltvBwwXS2Huf3PSe9 + sW22MCQ1sDqX7Fp64ys07k/9oFZz2eQmK8dT0gJ3YP7EWV9WrpYN1NuxsA1Zi4SssQxDBt3aXZqw + wcLJIeT2wtvYMeGV7BznCwnqT0SIhvc6jMEllt64QdXbu6Xa+pepU7bciA1vMFq2T2TrC6RtbvD3 + ZZTf/v8A5l1UjzLcAfq3JYC1/LNbI2aXbJn1NbAxkYM4ALOATuToLDyy6sb2QJSdWHLY74fRD3By + OQI75b6g13H4YOWWy9yfq1QNlkuF2xw6nUctvEW6S4Rm3dgu/UQiAr2NgWSN17OmwJ7j9g17u9nh + rCaXk2OOGMhjt0dl2/iH9nbh3x3dSP3YmNndgHbTrE8RHcmTdkW/CO+Axw8rjztsdz13GreN4zWL + peNgTj2ZrI2Fj23q8z4DgCOQ2dM2RWb9q911lE+8+Qt4vrPy+gs2XY17tET0W04S/i8M9cJA6hrE + 8eLnt2YasA+Rpa/V5gZGr3f9lwhsnt5FuXvkLxDdLpzkdS1lgcp3edzF20TJMdTiVthYWxZBySPF + 1j9RdLLS09ljMyzvllLdurpY8ITtP4yMTJWuOlnBJpgjsyT3GO3VeqUeO+MhujMyTyRnUYkkkXqD + Ld4JI33saeXsOWfSM0smzrbWWHfOXewSXSUd2iVy7OHptjUkMdzo7s7uQiL7CCRym9Q1U7+WMOR4 + YlnV2OQ/cikzgSWhpayGDNtnLD7l9IeuASOmCmkKtnDF56jtjfdq/FjLLTOBx6htGahvu+udveMZ + KnAe92vYGkH7idEeu7HH7IS6I+kA2fAMCyznI/pI6qTHMl2YJi+p/bBGH5ZxlluZEPV37ukGcADZ + HS14HZqcbjw8PJT4x+xFoezgmwhlxsekmMgBhZK7kPecbMWTmSsj9WBNu2CGIKyY+yPqtMsTnUm0 + Obrza6yPwbb2SR3usdm3g9j8jy79fUOFvLMT/YA+zAXiJOhds97ojpn9IFht4YP1Ck6l3IHVp7YN + mcS0sjgJO9nl8yxP5w6WP2kfUiMGVmMxkDLVbiyNNbt7gSQ6gB1EfHZgcY8sJO7LInTsvu3vC89S + QjZRkoCW4jOiHB4DuIz+Iftp1MPUOkJ0yZOQ+DYbudTrZ7D/AHCO4bPuj7wTfUlnwy3uENQdSjpC + JdsAw4EHs75MHUk6vUxXNPLycDFs8ex0gMCAwlmjiZH7WnDtgx37jFEOk/yDh274Lmk7X5yDb2KL + G/BbySHd9cDuTXbUdJuhbHu9sjSI1Hc7JCIh+2gOQz2Ye7ct2eM643IC2CO5eIywnvk8GFhZ9kH7 + sFkx3BMI928hBwPG8uM+5ILB3CJELUA2yOowdF0jLSznJtXSHPbbUf0QDydPIR7bjBpsOMvIXRJe + 44AYRkhYJKO2OzbokXyXYMLDaXsHJwnWwsY9RmQh1LtqbxJ1O5d+t1HQdh9LJy94ON/OCz9ss2Pz + gBw7kY6vLbPiw6ZY4yupBYbzu/so5POmT8lS1e7RB63SA9lvV9BH3iTS2cjppgE2dE6dI33ZnUso + x+JQm3hCRmzY9SX2FksEiPUKezjH6LEbeu5AMiZM9I/Plucf8+G8df4p/wC8AyjUxEOmWRYYNbIk + b+yrPuyDWDIDqxHSE9j+TbxKJrOmbd7Dsu1gPvAJaSK2XCHeH8uy9w326+XvBYwy9I7kLLuzTZWz + 3h1dp9v/AN//AJBnB8jjf82/DJ9jeB6Yn3ssOPTe86kSdL+Za7Z/ZFgkhXT3I3qD7vWTPLr7v4vx + KnpYtid2H0t/eTRpB6LsXRLNHp1DqmXk1csMgyEmW+pD1dTqA6kSTqPycntnX3yPcOT4bb1Hz22Z + l+5YP/Uvgxj1Me59bYdkZ97vyMi6ez+IFp4gI/uV9cM4OCLjDHUywmFstgG9j2S1OrNMjnUi95dQ + CY7n8xQs2FixCuy2werxwDEwi7dXdtv3ET/jvG287bbaSJntkX8g8eyXIDYeDdnVuY/lecOOT7L9 + sks4OHSmMI+kM7g1iEb5Pgz13Hu6O4DZi70QU7+R/mT8X68fy9n57zvx9lhKNe2Q/t3mkB6ZPFuy + I3H6hEiizZiZEs4eWD0wLTwBjtHVpZd4jvq6NnLJfka2PDO/8U51Dvvzeu36nwPxYsss4yyyeFMu + ru7f+LLbZJ5IekfRsWRvIyFwBksY+otFu3c+cF3LsE4yQg3q6scfV29ySDK/HP8ARCPAQwhm3/II + 7liXg+ZY7It4GGQfIc9uhlg2cawz2+4vd3P7dIXg7Fg3Z1GWFe8KHthiznT2QZ5sYbAc+Z/joGts + jvV26YLMs3jP8fZePJtRZ1PXBeOcf2Hf+3sIZM/2ce2XuQm3nIO+p6Y34t5blsIIJ2Q/vAO2WGL0 + XUZuT1Ztmln7nphb/Y8/x3eFkbc506QmPdl24Jjv47y8MTPczM6i2Pb1wNmcHuRQHuUj2W32QdiF + 0Ry7b1LSJNMkPL+rS9kPu7IQdkuy07cBJMOv9mDZszjMveSfbYeM43YlvePZ6sn1EmcF4I4W9Xl7 + 3DYctDGe8dQ04CyyfOoHYQUj9tlfqNV219Wp7G2F3SY4WdZB1G8J3/tn7dcESId8efD34tnJ5wtm + z/JXKO70Hw3e4Ym5R3EeWQcscM5HrZ50ySTu9T+4B3ySveCJdHIf38d/wUOHf7B+4RnHA8HGRZHD + HLZLZvOLb5De21o2I4w9Wb1G5ZpL6bLIj34MkysMvxPfZ6L1OLMgHkuRvpHvC5AuukkLPxzl4znu + y958x302Y2R3wcNuzw8rBLZvLwcSvtdfUuC9ILd7g7gmLOrNjrqzHlmyxGqz9tNvqXuNDYdbO7OH + SUWercbeMs/ybdb64Jzk2S+4+B0yRZ3wxLkHD+JPDpd71Ekcs+o4HCZDN7HxOrBndpdDuUNntg9s + l7llp3fYSpdmzjbbqz5bbPtEtvA51GYhpwkcH5w+7x98LynCrO71aE931NkXvIeBAyOuU/Isifgl + j7i8Sso7sVf0QE64CMMsPsdS3Uln+DZ83Z2M6cHD78PvnyStPLK0lh+p3nI5GkIQWRBy8vwbHohT + diX0yzZkWoyGzJmqe9L+3+GT1xlkyCdfIN4AO9xxnLfdk/sv7v7Ru93SXr/MdwQfVnKdQ3k/m1xb + h2XhkcWDCk/l16Qu9xk+x5xkj6X/AEsGE07XdpwI+fE4Y4+74kabAGl1LR8t+L1I7z6IMt+JznwP + bpAj4+WjbHBiz846k/LM7bS7wniQ9eKJDrlhYWE5YWJ0YycJYW2/DJI42J4eN4+rgmMNs3TA/cHq + f3EYcef5hGqRxnwX18vL7ut4S7eA66tLYupPhvG3vG2PHtlttrbvweG0nvjBn1DS3IbOMEuu2Zbx + tnwyyyyyyyDqGpeWJrN4dn/EO22yFl71wZ1iN4M3nG8ttuv26+KbfXG2/vD5MzLJ7ZaX1Ws8bx29 + mzeiIdW33zkNhvWX6WFllkTPq6N29t523ksLLOWwHYT5IXHU7Q8+vkMn5PDPB3ecNvwOoY5bx93b + pBgHkM4eQ2Bm2dyBh7MJ4fYtnIDjBMYJf+JhPuzdZ7WWNjZYcqsJDbz9/EecZlknUcYR0FjJzWE9 + R3xtt3L8mfhnHpCCzu+xZ1xEfgGTfeBVkb6s7jjDv4/UcDkM8b8z3u2A/AeQ43kgwijfbHQYhP8A + LM51+OfBss4euO2MQPqDGNQwyXD5F/IfbJ3ka4DTZj3brJ+Pd0/JkOjgPwE8htl7P2OGOB3q8s+5 + nqYqkJR9y7ff3wQDPTnz75OGPYbaa4MHYdWx7ifOGOBzhGImT3wtGOLeM5ecnCTHPgm2PA/XG8Nn + hZI+3ZPGcMPOdxp1w39fif8A7jHk8F0/M4T4HCGWOwaZDGzSDk8/yLJN4f3l+X6W22zyWc7ffxbD + 4r8D6fA+/P2H6ZD3taPUO+TK+U4enzOW+4GOC9WcDb8zq2JNvC94bO+M/wBDgfjn+J8wvZZg2vu7 + b59n5AcZPzj6/wAd+G2/B+HTb8Bt5f8A4ucZZGWFlkxPGfL+2C3hF377gjbUn5P+Oxxtvw3rnfhv + +m/ELZ+WwcnP1McLBs8pZLIUH39gO5D3f//EACYQAQACAgICAgIDAQEBAAAAAAEAESExQVFhcYGR + ofCxwdHh8RD/2gAIAQEAAT8QKsGsfacYhFmdBKYvUxIkrjkRnhg71CQ1MQSo3Ef/AIm4ZmWTBMsK + SWJeblEeUy3NsGozDQsLzIk8LlZcQ67A2wMTBmUuXABggvbS4ttiZX2RPaHVWGZA17iG7lGGozgH + MeFTlObuUsLIMBFxCqrMFvPEuuy4YZnRLvJwcXB4x0RqJCfkNx2qA6iR5I6uVA1DLi7UI0c8RUkC + wDIT2wywFkYRIEayuJrBHbjyTWEskxGraV4lGJd5NsUq144inODzMn/4ZTEI0hBjonIxQcQqnW/+ + Vbkg1qXOpRolMKYglFxdY2+oBhtqEuZYitTLZloEFSrDU1zOTLZMViFtEPGOnjj1MhWUKQ2uIRJs + mUwwmBdyxnNAHMs0kAFksgKsRWj7CI1kxQalflj+gEQxAiqmXZHYqHFSopNQt9A4l1gpyRbh/hK0 + C14g3DOasi1FvUsVV5YUDViCLeBAur+5hNR4diNBQGcTmofSJgDW5WakfIy9zkN+YbaJdCMAWAhj + x6hxQ2yyxREjf/xEJAqNREqxgAgDFDDWZdjFzmhu5vVHNsqrEWRNkvLMXE9RBpuD8E4CF3r/AOCs + 8xxa8CSsPEsyxKBMZiYt7bNWuFsRCRVdmopVLokwp5lRa3UcoagtXF0TiLbKin1CFNXEpwDQ1uZG + 2w2R+2vk4iFsvuKoEXSyAgM8sW01EKWQIUEhgFvPcvpyhqAg6EaBVsqZN33Psl3IEDjuVOCGwR1J + wvMwxGrXiGNQ3LSqgNEsw2ssRjlQvSZuYVqJeTjLqblDUo0YpwQymVfMtWkFsTFBiaRleb/+Gu5G + WqKHcQNygzASUWJkajSKhOGVEhiK4umCJ4XDCmptUdMBuYDC4AWN3GmiXCkKCWNaamdNSqpLACPE + B0QbamDSZxNLuv8A5syQBzC2rjQF21EsVV21mXp3wRNo4JZxgpThmeyCVUYl7JuAgRWXuaNDvyRA + xjA+YYamhrbUANNju2DAMZDr/wCQRoIbcuvqWIDcxZIBYQxaYrSI+puyyFlK0hUM3DHMBYJWNzlQ + 9cDtgAM3GGKimYhrk9MYEX2OPcUKi1URd3GogAQU4mAmgsQXcoIY4YKJUHQcPFfzABqHCoaP/gQa + vBELinB8QCTqYsRSJzYZomAl7vULwFdRQw4gC2GUBMrpiaDj8yyMHLzCwrBnE5CLpa8zOUWJa3bC + bJbJgqYk3EGu4xTqZJ0VAC1JfWPQlIBiCgOVbxEaR3yRdQIpb1MMGfglxqVC6liiAXMgSlAQltzO + 8RRIFczEkpeYoIVTeF9RXkYDRBDHMVOaBmOXVttmjio5CBocPljqhGqNKsUMh5qpRKAU2vVfo3CA + /H26jTCSnZiIUoDawPmCAW6pYY194lYoWs9LEN5dARI7qOo0wsttc1rDRnmUCt58QqhRSuBeD/kE + IX0Ijwb8ahcAVhbxiUO33r/2UR7lwxCbRKccGoo5ItQoIAUECb7KFHuZRzz4iwIGzIEyBxAYi2Te + RjE08IlRY6iEvGImAMEi9NkTFwIVKuJbqMLpGDXXqbF+5zKLZS4oxBhQgqzLzD70MOn1HsgAFEyL + jExJnLlZUu6IwzEtIWN1GHJqWsMzYQyGKYCW2j34hKBdwFqbguURew2/iAzvljwh1GVaUs25dagJ + WsRCjsy7L1cqu1tL7aun67ojgTUXA4F4eEcjAs7chEylzCFPUebRKJMOj2/E66kWXzbGy1yABv8A + yoaUQZsfI8TIFMt2fxxxGY4BZduaO5SVo2AGWWWOszEerJyrPD6maZ4bM3Wos9hSjlff5+YILXYh + /I7+b3BtabLt8r9I+IwkVZ1Y5/EKGGJQ1z8TQ8zVwcag3alyIFlvcDVLKuXUpA4IKuYtDUSq4MDF + lcVqu4FXtg8IQbYCDbC6FXcpLA1jMI1bZjrEsULxGlS3kQojQ2yjvSUgaBOYwsRzMD8y03uWLFy3 + AkuWWXlmUHCERbcMFyR6RzYW04iXix1/UqeZ8uF7QgnSFIZSXFu6gtCUWZXbEeo5u7g5VGTBMgK8 + wWOSi/iMEJk1YDCfZK6Q+Coy25SL2ZCvHx+IOaLJwjgGs/u5YJkKg3BGilxfG2HxbL81Vf1GxGDu + LWP44lTjhpbHB0NxIC6ORxdhwTkGKuDXBHBqAacla9VKqgQSwc/+whYRpPlaYcApTkPX18TCRAj1 + 31thVlLPncGg03opbnjjMBWp2S+R/TUsDyidXtZfW8eYp43DwDXoSnkt+IBIom6zioFgW5M65mC7 + PSHAWemOUrMvAF9yszLUAM7lRKb/APmVjxlELPMLglUDRDWwiw0QLXcH1JY2DxcISi9xKyBqZVxo + ncu03EgIsWiMBWvMAp7O4pHEIVMTDKAYPFcdsal1rLnk7mDiXGu73LlVy5OajkWMEyz/APAVyzH4 + Q4JKCWCoaXFojAqG2SWXB2sxG+XqAgGWbUYMtqXDQ6jwmY278XfcpUUUxz767h6IN2YTHn2xRSLK + WTOzYcalgwEaMPfuAlrxA4xfKU1LiSpGntffEo745od3j04jNiwBX7eau/ioMUdsK5xLCY2W1P8A + v3KuRyrwPT/UYXqS6ROEh42YGPWys7ZJXNabLWsveNSxg4qFq0406Z+NfiXpFAE3fo+2I+RTLRBX + Jf4CAhjhmNHRe3Veaj7hDNgOny6hAbAg0rncZpVTxzKWlT6sGVo8jTEv5o9ZjxCDK2o4ajSZ02S9 + jwT5BEQME0m/mA7Ki8IVAwyxeaMXHcqzEOxqpRDdDK4VpJocSKcByx2glZj2HGyUlTLkWKW4iq1E + aVNdnUUx17MCRSKWFaeYG6q4o3KQKSxQZnlmIbEHIQVqA5is4igJRLMDGJzLmVKcT3DUKxC/BTGo + hiBTExnSWqHMoEz/AGEXLZqN4ba8U9RtBFXdA9LAPucnfsSDZZFQ+XmeREQQ54g4s8eDiOMgqVY7 + olwygoLVXvP4iIjadzwcG8sIvwXgpM/EuwkMC3ZbsL+YzuUMyKVeC/HqV6kopolc1n4idUKMHC6T + ZGZj2qdrxUCckCw+oyCE5dnuj9cwpWJsJXvL+H5ljZHDAatURlNVXkckfZTJOIHULbBBlYJAALS4 + /bVw+ZVXT2SwxKahfVYldKeyACKTP1iWp7U5uJmUdECzW4iI6aSOm1KQg27v/wCABWGJoArZWIuG + so4gSUithCCy08TQmEilTdag5QVhbc41qCeDHc2Kz3EOB7StRSKdlZZdI5AqPHhnASwU/wDms11A + WIATmsWwcREBiZ6qCtd8CEKyiWN1uAVMxLEmNhi5U3Hm3csQIqSrggrKnkLwdjKc1gHL2Qw+9lT6 + uU5AGmHPL4jjXYMmqgPzDjDCW89/EGO8m1LX2vJxMrhI0IFp/LG5y4KvIDhyQU7Vm20tPPUC8IB3 + UprxjXmFHwqYRXrYEp2NxoV/DHTVldx0GqChIOACxbJQpw6uWp/LYrxYs/qNKy5Lg4maMtAZ9t2y + nVoSgGRzeq1AfRyMrZaZriPYaiLrcEG93EPavSHEtmTb4juiaWVC5qEZyWA0jQxHvCUjlfcKkE40 + isN0HKReF5lgOhZ3lbuEi7Vl4n3uZulJzE6VO2YSVxEykUqq4l5MMaWCpXqg6uoAo56lWwZhholN + aIAsA47mBqFhioIFPJZ0qXwMMq4raJMA9x7qOww9Q5CQsjmBVvxLECCs2eJnTua9USqmsaD0Rguc + 6hpcLqIozFViOw2sSsp7GYJLORf8GFSKZuV9WfMMhEvRv3cdtkhMVRgMDDoMO984a+JbW5ORWHOx + Vwn3a9CPfD6l0qbxy3CfnMoxaCcs/fuHyO54c/dwR2wIN5g+VEythW0aT1dp4YQB9jWYs9YvHoCW + e5dmzun26mTc2R+hz7iFgYDNuQdxYvOML8D/AJLJWlGXs0LqZttaKeYkWNOWGU2M+GWle8XAwsCi + IDIbPMUnXcwswIQq4gGXUM3wbZNIQz0PNupWlu2YyFHPqGmsTMC+yXlKGUexRlICroIFXizKZZOY + pKclcSlamISPY5yE1AyhIkmwmoiy14gCC5MQUam4JKSsswV3zqLgQOSHX/I/+XaBFCt1KyCo2Blj + lXmFewiqg+oyV3MmKhzbuDmphCuY66ZlVFQxjMhBVBHM4Tjg71PMFCr6lLGi2kX/ALES6yKadyj6 + jywf9gZAKjsuOceSJSHL6OgPyMdrfNiwWqxm3zUVsK9sPb1HFihlzY5+4WFr3PGoggqc3zi4LNhE + FFu/yR4R5KrAfBj+Y4ZsCEraK3o6KxuVgSbHdG8+X5hQKOwfIOPmN8KcGTGrgQ3MgLHI7fzFV6ak + AfhuAqpty+s4gYKVu5TDlVruW06wiaUmcaTAoDGTJCFWfcXgpe5HfTtxOTOICQJwGIrsw6Q2ihmF + lEZzS3RCPmKHUPOBq53QJmy6xKy47mgdcRBKLJMItsWplRkis9xC5Iih4lidzMHGwdykBzMgg1j0 + 8S9R3LIZtlOSrMFQBH5iRqPgfMocLlgGhszCGtxpoVUWptAbNwAWre5zTUQCwRIBjnEE5ZlxQ8xC + RkKytUGWC3C4cUtOCaKiVWX+QHuYL3/MJUBaeQXoauN40gquarEarq8StF+5n5vLjzSpyul4l9+m + 8vONlQlwGyU7Fe2fcuPVN4ZL9nXqohUDFB++ZnCXs2RK2wHbmZSgr6+IAE1LKotjHd16mGRg6XWQ + MU+ZSkwAFMi/Ko/gu4fJo6WGi89blZKGHAGP4hdlIgvs8xOJdVpLkMDCn+H/ACBF2sAf6Pq47jDO + 5VgKc9QZWGztl2DsRitAdkKg5zKEMuUs3gyoNgsg7gzj3CuZR94xMXIITC1L22ciLUqcx1bW5pHX + VwSGzAREIb2ZEmb4BshADQzc8RUFsRewQJwqEt4eEZJwSrCxuFg81Bs3mAnPiWb5ZysYrSULlPPc + RiK0XLCBkYEy0EofUsKwLNogL4gCrVxF4GLSKUQsELCpg+mWot3POo1VqFSrKWICDUzVygI6LQEr + lGGUqPi5aBSLRxfj/kOsTEu8q+4Wr3cLSrdQZ0tq5cJzDFWRIiuhZHo9QUFCxDikVeZWYFicNUv7 + hkVe637jtKN0aipKNVnXMEpNZiqAKZKpYQwQxGAXYZjiWlOzza2u7eJwdbCp0Y1UCqAVwf8AKogI + YqhRStS4xmtovxWPqBKFfog7SuxfyMHq5hLwgBYp1M/RfMcxol6IGly9bwcRjaZrmZQpcIKfAqYW + 2E/AHiU02+04ijUE3cxGASjDxKspCt/ZISG03COCm2MOFKbicst+okkDyP8A44B8LIdCA4llPTML + QBYwqlB2SxLmoV5Tg9RDO3WojrolhYRri3eplMx4jnnUPMEoZl9Li4h1VqIAGFIYYhgoDwTAnELo + uY7DreCVOjEZMNcwBnRKVAExLkN0TYSa3e6YQFW5FtUYdbKK03mX8cqOCtPhmGmnDeGMttW06CUf + UtG4WiJ6aK+4JYqIh5XrtiPu6d+ZUraWCFGQ5dy1ahvE0oXENBtQ5ziUhol6A/8Ae4xLXgfhvk6d + 7h8U4jXwY4dkVGAKS2qaqAytZWG+HTzLA6GmT8tD7/EIpOFkb8pBpYsNEeSi/wCY/oDmwp8wYFIy + ShRYcMRF0tGDCGJa6PK5dTDlZCy2gqIKu+IhlmOECPKW6zEOw3czUaGBX4hPK/EQNDCiXE5mCo6t + jiBmy3hn2noLsW7mVGe4pIXqJ+UQXhuU0WrIQzLuWjzgkN0Yjdjyzm1AcyqlA1tEOBELEeoXKLuK + hTbiIUIhbCJeWzzDFNyhT7ipb1c4l8uo0/3CILK1GseTmUI73A00S6Li7rUYi0LDPvrj7jJEX2Xl + XH1iW2IEyDroOWK4112Fazbr+fmYDD4Jk5+6g9y1XapiPEBQXejrjzLaV2N1u1d0d3Bb8ooVbqt6 + r4iI7S0yo2NjkoiRbKMUV7lN66ryfpUct8i4m8ivplpZWD8kDo6wFYqs8DVmPb07UmN55vnconF9 + xYDA+2vqWRJLQesvXEINzp8snwwICBlN/N/7AiGyKX8aYrNZIrL0d/ZLFCtI2fc8COSImmMkMcJj + 6B23LcxDxEXKEgjmIRrzQsGCZsnBn9po7OYATwxrayRrF3nEILu/MHfenULwI7MQNd0sIadloILh + RFI8QkBDzLYODNw2JVjiMBu9EoquuIMMWOYdq7xMgIKMtYOYylawdsDZjh1GQvcYZ5hkumGEykBK + LqZiRddHuIGx2MBljnIVEWGosyBLK18wGAzBSjHMRQosJhTF8wq55Qsh94PbKuoF8C9uz+Domj6p + dVocWE9V3DTb5VZwz6qPaICUC4S6uAAEJw8OVELWWQwZtujr1AuboFDxblV+IUEgJ0GI9ULWEHK4 + xDUz1XHEMBxVsaLhAHNYXB8/iELRxeG+5QI5xX9oiAJIG0upe8a8UAAFZL/Mr96pyExxdePjMeph + Y64R+8kuAqVWE7riGXZsrJ6YEZXLl8pKwtJ4v/pCofSI+QoPpvx3KMVOWi/Syi3IiovE6dyuhN3C + QLTBIPcFC4qtxFtrdwKqRl6YLFLQNLpMrfEqy0ImAqhBS7zFGlQoItqNIy6rZEPBgoYsBDpMMs9a + lh13G8BcsrxmYB9iKMYI20tngh3wEuzJ4ijrcTggJop5IRhFe4N/qjGKsZZtKjZTuJiwjNLSVMt3 + Dpe3JHe6a4qB1l7mXWZaVuW1sgKHsVlVGW1Ips4Js2/7EBcJoDvzHVK7NdJgeXJBNgFXFvfLBRvU + DlTHqHKu+qbzDngtdrKwNffUMNRUxjxQHiYJRcHZNDc6GCMSJZADnETYoTdcxbu11a81f/LlIOZ1 + ZccQTli34znuEtZwCl17jU40QukJrVQw6Gc1GzTUHtKj67NFXEysKt5h1wyKyGGvIo9bvyZxDALN + Vf8Ac6qzKJKVsD6ie9Whgels+H8wUcGxCkYWjSWRrAM+qI2YM3EJcxcxcwKpwpgJUX1Li3LqPa64 + 8yyFTK9Re82am3b2EMU5cMQArpcQrUYhooPMu+JPcUfHplSpaWglfq3BhgPMq1pHkO4zB7mUPcwk + 73TGA4U0kQvdTKhuBqi7oit7zGsVAspULa21AQl50wmVFMt1EGx+CC4JbD9RC0MGFYNQFzi8zdSh + kJm0FFGLX0f7E37B5P8AFGPHHMKqp5OT/Rijoe2I+nOMaPRk+/UHWFpkcbf4glx8cAutzJMkRZRt + d0n7xFxFYKIo8OeJShQJoeS/5mYyAuoCZGW66Nr9ksIhwXXNA5lC5qrb/Fb3Crrd35BrHuK+B5sj + 61+ZtnCoxr/p6j/IqvJRn+vuYc9IAwtdUh/yAKVBYag2UqxEOkLY7iEHduHij/XHuBLUDpb2Kwr1 + 3GXAFTxjXs/UgYStvGePsb1DQmDItfF5PT+JaomkWAOW+IjSWtRQoU//AAJFfzzCig0OkZmiDdOZ + 5pU9HO4zquCpmek1KdOR7hhrdkDJjklOZcxgS3MSJBcLxHy745lpd9I6mW2e6YLrcHOyUmAZ7QmF + GGMJd8CAQNy02RwbEYGZQupV6LEorNMxK5qObjnVHUXiZElrLkrFzRubzFpLbeZZbV//ABF1ZWMf + CQHuLOdRWVeWIoC9RGxg8uoTa5TtbAb44wdH+wWZlYcYNvoPzEH6FqWWK6P8rtZeXoEOmlfzDDBo + 5d3m/bASQbaINtz9M7xQIN0gZXmGQvbGxhgQ93auYa7cz4D44OOIRVeVQpqxmPEgaeD3uLqyNqq8 + XoQACquktI6kYgoYaFH+RBODUOtq8eDuKsZWcqynuWBD5YZsTr5IWe4ti51gcxlktSO5sE1vkYYg + dVdG+i/2D7hbBMCD3of7hHly9L94dL40/aMsDeA9XiBlq3rH5f7gqVRCj0B1+/IYRIMACPHUw6rj + qZDV9y4ziGYEXaUdrYyVKn5RhGTki7K3GpncqU44iiynBBq1xvDTL5T1xEs0ZlJCPiAWxAxddLHt + keWLqCxqPOrEYgmzqY435pGU6MO+5RGDVnMyl8x4rLiFiHzDcMqBucbxFOduIlTN6ZmPEyR5ihlY + HLLVSXquKQDMh/ESXdMS8oIcEsGChmKl2/EQUsFJSt9nxC+jvedEIlcVwDgHfzd9ROoHYBVJo+l+ + o1D7ymyFiXi7bgI+oed6Qp83+cJCKlGhP4+hiXw6rZCrCTXHyRce8RU5yKMe6S8EW5iWS4qNlWWY + ZjQ0ii0BtUxn5/uAMlsdFZKeYT4bt43I8JA1zTwzPgbpZiJoQ0cPTnzL8LJZeTyTMVjGJwc5W8+e + e7jpiLYRatLazyFHi4U72urlEsfRdI5O0zX+1d3hA5tWX0W4/jNV1vjiWa5vI05EVXwDPOcVuZNK + PL3f3LLacAjJ1/xjAUeUT0FPtj3EAX0sA8djuOLsuIXAja5h83owzQGHQCuGYCqQ7cX9LuVTfwI+ + ELgisjeKIvq9w1ssQtKoCWND3EHMUzsjZyhohGoIQSt49M3eyDOgyzgHAdMuq0QZ4QQVQR1EqH0u + US0krgPjKYWICrHqXSGCLNMqR0sAQKCWUYit7mhLtHFsSgJAMlAdzEXuJXUM7dxrrSYDU1BqL3+Y + E2V1LQOLmG2ZnjKhMF9sQlCkThfL9a9Xuo8cwq4A9PHxGgsSiXccjBfhnlub4JFHSrKvZTqGUGGv + rFuDPUq1Z3mv7d/EAN1olysUp5wK5wxnbMygzZ/JZctgfJUAZHCiVZ6yuQtqGzf0RTaVeKGnOoCr + 1Z02LXBKCtAnhOH5P6ljdFuV8dO9Q5QththrTvJx4lPeGVdUMZqjDhXzyFKcJqkWiHCWaZ1ExUFm + QNNFNZxD0Kg2KZNvzRh9kSI88Mq8315+47Oo4JfzEi0brOfDk8wUZXStOnf48VEYRs4rLx5iEOJs + DsHouvOGyERDqVHOi6l8KLLiiLsuV9TDAxiBqaGLECwvi5VNyZQiGWHUuMBctwy4jTkeGWe7hJdJ + qKgVe2LMIPBHtiRDkp4hQiWYG/MCsA8DDQ/KC0w/JGYQGR7lBuZySiI4JKCTNw8FnykbtHkOyWIh + 4JlpuO9IKi6aErUrBRDCUXmBWcslW1AlqHqYJuDM2gwDcWyLGHQ64iKkpIQqbBAzKpbTz3EF1BUb + /MvJXcpfJ7Oj5eoThFNgDw8sGBo3mxoH0nCrutB9EaKUbo0MBVvOl9BVxrB4Ery7V1m+oDIHht8J + wfvNQBUwQcP9YUi0MO8sL/NevlMsS16K2n8D5x3GArw2ynoRy9Dm4SgK/wCg3SV5YINQVUgbAwNV + VtZY0mkPmKc8N3xDWI8cDbvD8wHQGhS3I0efJDbLKdSzQVWh395lam9VKV5RHZnI7s7tgXbYJf4v + X9Ylzjhif839QXJH5jxny40+YPX50t1lU4cPmu4ca2mUg7/lTd8dxE1aBp57HTuudxU2Tlmv9/vw + ZYAbKDNPNhp+FwXYoC6z8pshNTlZIZXF5WxPKQgB8R+v7NRVH0NxmtPOFz49F2kqtMZUNRm7QieX + gtRJTxm2AgxE5l/m6DMcp2IYrG/UVtQjLeTiL2jFao2TUT4Y6ICpUQN2MsIc9bKg14z4hQeFUwrV + xrNixcM0dFy2pDwxZiHhzNwBke4AIcYxLi6xLIn1ANMSYN9xAU4zEWyYjDHUw4volXH1RRpJZ2Er + 0DwQ1Ub4i9RWnuNHdysRcxXUVh8JfJUaDngPa0fniEKqoKAYfS8FdPUKtEBEs57TgwNXpjcdicKj + oVAZznhxoKEMbKKbzvV2xk8wbF95tVs62oRKtCByrjegMqgH0ootUhTD/IvuuIgN4dgLd/WHtJhu + FxwlB9p9QKOlAXbz/H5gQhoXbuj/AJUUA23B0fxGcwoltmznwzGttZHSwT74l2QSt5oVCiW8II+B + X/jDUUACU1j+iMXkLBr6eNfCHJDKkaYp2wNXjJ4vWIxge6rTKM9lcCXQxzq9V35kWZkBoCGWAJDd + Rapy8BfKV4xiaC3lRzXqYQiA2PYfzXvuBbmeUw8o+JVY1ro7fsTwnTOUVHBf7KxsIICrXvIYMUaV + kPyQXeHC5iCrWjZhItUVh1Gtm+fLDFUaS9xXSF2KY2LTasX3M1LXeUMWlk5YiXvkhlrDyblJFf8A + ycJmbmWQbmiOYJFTg7McyDgwtrNZKhmxTFbg7eLlxSOEjCXoXuKt5FnEscPqV3WM1UOvMScjUcaz + cUIFdTSDeotWXqJxtTqHLFKEQe43DQ6JQg8xFq+SaLYdJZ7imQtq5gxuG2pIUbfKVhWvzLyJlgFx + 9Uf0X9sxLNAsoLBZ3RUEHuzfQmafWPuVBlidg0ZqEDDiHNRo5oy3zsbRKF1rZvw/g6peC3rQo8LG + zpZ/btnTihnBtf5fvm4bSFuHF/4YbBNNsfkIs0oYYhgOr/G5e6wNjmria6Kk9Ov5qAu6Q+MX/ZGF + 8Ma5oS7r93LB2aV4eQMFgcYDBnjoa1w/k6a7VsXY/wBfwyhFSl7mm/FXcAYUvYYvemxfH3KNxp1d + fL2Dnkdwhti66LIPC7UwraZAYX3MUCeDl9f8iR2MU6e5ZqysskAIbHZNDyW/CnMqMI0c3zPCKAHu + Y0DAP8srSLX+EwvZeeII7AVRCqzNZ4gtS9Qsrb8YlWJbFS+Ac7FxmyirY/iCEufWp2E2lDY7IQWe + sYpULUGUdkqpnObhWW7nHc04rBKzIeYwuu0SALb0wgZLGyG37AzAapguEh85ib1OSD9JWFSuL6gx + NDZftcJcLvJg8wKFU4VWJjsu0d4CQHlVd4hqqTxL6M+IFGonbDqXszzmKN2zuJTGuJVLhZSD5TLQ + T1GApMzSfKWIKqXS9/8AT/UKzIcv8Erzy6/3iYPRKPqeBaJqdjlLbF0X3inVw4CqeTyOQwvNAdSo + hgA9H97+WUtALBcFykQ8DowfvuWQ2yBzc71QH3mXJuwXt1f5fxMiqS67oP8AL9M/rFyhf2+yKJhc + +wS/nX1EUsgPjJX4ItlgKHFKX9BHPi5ZzVnTBpFLAsxw14iPACNuvvjHUoTCgVT+zr3OFwLoo7P3 + 1CsvFRUBgh+KifkB7eufdw3ADBoh4Xn25/p8Mzsgn+/EMqjwkQ0iKb9ThvcVQSUuOVlkAqxwQ5yF + b2G4lgBSiMCsy5D4hRqmimDhPiYQtF0EEPLpDX3Mp+6tQ+cyuU7sC06hPoONx3Ca4O2yAdh0x3ag + oHJDio6ixAil8w4i6qCcBjHmWC4QjoccMxPJrgiQodDUrLR+TFzN/ZhOIULLbzLGgtnkAF6Zg2Vp + DmNME66g/NC1HSgOcxuFp3cJ2R1IvCcbly6sjhyFoJQ5L0kFQOPmUw3DUx825Sc2keYuxCCeCJC9 + HUoyALe17nSRp97fzj4ilsFu2foglstz5f7MR1M+P+5jeXCdLlB3VHpZYeqw6tiPw4zfTL3Ybbn+ + 400fXNf+R1M25bzm2v4+oEGpYuKzf8B8wGzw6gBt/JHXa3VeDP8AKfUVs2t85iPsBkgw3YR4S4EC + qD4E/wCyvcBa+buBW70dpqq+40qxqaWc/m2IEHTC6cJfu/iYzRngc/j3Dt0wbhICLYYR/uYIihLH + 331nsenxAakO03bEreeF/mKNN11ccEh8pAqRXNgHjzH0o6RsiCzHDEcSkbtAKUGYPEq5C2rhg689 + R8QcFrJTXBlogIANZEqgpqi/7xLrlLWFJOssq338phoFqN+HvxESKsgphOwlN9RNK8gGA8GKgynH + Ey1kceIaCcRHmeEIHnNzKVl+uY5LLWCP36iAxl3GDdQvEZCpVeKmcEOQyQEZGoIhYKXuCcUcTFhV + T42cIp4oRWu1sOOlLi1mrrUJVMMxyglsPEA8w0OYZBLJKlaW8xOxGs1Mmq3OSGLAddwlgoLvtlRV + YWY+Jd8o2uEoJYorO8ka0NBX8zhFZOOQx3arXKvMr4tDzRy44Fr3fUEREGWzu4OjkG+4o6Fa9nXz + /cIXW6uKyr/ghNJtb9Gf7hgHnPR34V/r9SibVGesf9iRyNo0gpg7/SChFsrjCvxErG5AsBo+vPiI + COIGr49QH+oK6lBBX01AUo1/UceLhEVRaQoepVMNgbbP3xKjaFIwMxRAOw4iA8TqtREb80P7gKra + DJPMvypeCyCEvpEslax89RrvBWB0Qy6LXSUqG538QACOaV+SFG4rGCK0m7UxuTttb9qgFQ4XNPcs + UhpG+h6h4Htb6vkitQN6DFvwLBaQzHKb6RiAs98RlAZYDgczOFKMxC5Kf5grFBmp4pfhs4OIBZ64 + VPX/AACKS64OQg5vkKAtOqGnBymWLEEnhNglgRTxA1UeII/tyZNHlD1QcEX9TRjcMI4ZL3MQM/cX + Iri1ikbXx1C1SrqFicMZnCti3ruYEtvRLYlV9Dzf71MqtOUrCMuK6IXYMmsmOIjqwCYp1b6W4GgU + ftiBqChfEpU5wwyHDZ+I5w0K/MG10cXAvGKX5lIAZJnwyWHQj/n5lqR2XlyD9RE0qbmRX8x3mYRR + tx6UgSkVTFks0n4RmXEBDp/dwa0GOZeK+dRrI2U5E69jCIIFaH8wW3CuOY6m13RxARFJhlzEPmyM + luvgYq8+eXD+wyxMxa5bmGhXw5pMbCnJ2+IW4rxxPmXlR08H9kYnkfCB8lrJ/ZOBgCr8ExDmLBbd + y8spDVPjiMmh3bhlQB9E9kQpVx8kzLUUywAtyx3UDqZPg3OXfM2ZE0NnvqV+CuuJYmbLhgtdNIgb + q2SMjKzCZfJpl+seOY4jpjxCpV3klekADW4KCnNMGbzhriZy2vUrFC7iqqniKoLWmG4wcXMul3SC + K3G4hfQlMqSgckziHmioOvRw1KNHDIMuUWNLlm1HujP3FQI4wDAEClvVPDGFT1jmlcX1UBtvZuNi + IOoRHvGOsG7vJfiz+4zbar+vREV+YM60RAPWZxOnEwaJyiADtf8AyUxsGLdg38JA4mPybnED2HCL + V+A9Ro0Gt92/gQrVCSq3kdKz+8QqmgoUujFPWL+IHNwVw7gvf/cxRBTk3c1l1EzTEMlqZ3czNW8P + EM2jCOFTMQOCJWZTMsscyDmPu2XZGiuURzCNgbGXPRtrd+psBzDz48xJZTi48QCKwcnjxHoBXGZk + 4J4r6q5bmlYP1iJeIqRKt2SlpvEWa6glm1GKlCnDtGODHk8ISkMRE2nTKSqzA4hk0xCyOHmOklnc + p2YxW5qN4tgEC2Zdgxcf32NxxSoPapt4CCMrTjEuKA6xGWAQI4MoggDuAyrbUYuVyhYLt0EXuHAS + qrLmjUrdi5u4GJcmZYSTZMRicZglBDcMU3BgFguBSH5C1iFROyC5r07Dg4gdEHZq4b+swPy/uYjA + Q5FM2QHDgzDDJgJtIL+v7nYwf4/Mcdl584v+5mW+V/MQHqfAubwLkEJhJBhznl8sqkBrLYW2lt1f + 73MFYAmdKfqVCsdeJf6Q8frGQr7PF7/d1EV+RTsunOcb1mOAlaVYhKDRkC8y8kOHmWIi2Rz7/Et8 + qMeIQyV8amSwPjEFHRdYwDBpLj2zUzjxLIpMZAlESFjqZc5Y3aS6mP3mrJdvRyvcqTcoOs5Ipk1Q + 4jrJvKruE0vpYjiHcyZgLKKPPMWN7VyGAruR6jmClboEIoG4baGpuLSYEGjGyHNifJDDwbC4BXzM + C70QeiLddRNZ1Y14BpJUA22yyS3dcx20P3FaLsLErbDARhQ3fUtYzgzS1iCtK3yxUoHZvMLwBtjc + LA0ZJgWm2oid0yxgIMl6iqfavqNWi5thQ9urMsDsh1fMYtjY2IT3BajvH/PEGkpqrBTPF/5A3uRH + iMxJscjM29wqgQJCi6vJGmOIkujphmsN/wAxSrSyNtmveLjEOzJ+P4jFmg/f4hivbL4HqLAZov8A + L9+ISMwqfVf9/EYqkp2IJcGQgtDhSs/n8eIxWgiq24/FQOXMrUABssKto+vuHkGzLdhbAEKDBXEH + ZWLamPn3DUCscv8AREAiso5ef2o1ynWHjMFmnv2Pnubv/ozOeK7ISGA7qdrTmp6ZRmcsyvcYk52N + YDVddcF61wnEq4LajFOmXIVvo0yt1Wf6GDosFcS3K1pNkVoUpRT83FvImg1DYJlXT6jgW7plVbrQ + 5mckBlp4iJUQm/NRwlU2R4TT1NuB7iKStrZLYFvZM5L7QhaAiiKXgpTeYA32JkKUrdS2i1ZobCLQ + IL1qUSg1rUq1KuxmCXuNxnMFBBpSxbZC0VA3wSl2fCcRARhyMIpEq+WHFFZWCJ7Zeo7w8IsjYNHP + xLuCh5J/kRXNNZ/LqUt07tuLSCOaW4YQKaQAnL8Q+eFTnjXj2wI1LNsYbjsxmAXZYLQHZ53BvCw+ + IZttHpIm6Kw1kLPgPuHgr4hiNLZoIGDezO/2og4XdBEHZWS/NRmgbnwX/GK5XkLgPnMEZWlDAP8A + RIgZAS0aEKtmqPeT7gXAzaq4phNcAOCUB5lqK8Z/cVV2sAX8Zz9QcD2AOV9J/EqOSVQm/HUsjE8O + YVYX3cJw171MhtZk3Lqy27Kv45hLVF0gUDsPJ1DgC2XZ5iqAZP4WBTCY4ZfrlgcfOvzGMDaJycx2 + KxVAUApw1/6m9wchpv1K2GlW0UZ5oQ+pdLav2SyVFNg/P+wx5EJ/jBYM07BggUl47JjF7YmY0gup + ZRTzCRLyqAw/YRqpkrIJbRNQQ3EQcNMpGxWJwMH3K1KCeWo8wdMqmJjTg1MEKM5g4BSHJBap0cZP + UUKQaNktJnAgFOfPMIWWaKIptorMHLW47lzA7wXMXjZeOZboFrMX5riG1dvNlVwDxGgYJvVWrkrH + 3CUSwCrNOXO4WoZrFjfUWZbXgfowgi1VxbpydQYrZuhVQAjPC3H8q+XNm7daT34imR2QYDSnKwy7 + FQ8AF9qZghGAsd1/1/EMLC0Ivj/wxA5oPljMIPkfv1FDkNB8c/x8RZ1RyoYz8D9xpQJlyscXFmwr + /pGLxrBY1D5wfUM6p5e4AwlVMg0NfxFojKkv2y3ElgCvjNQ4+mac/nmIsVgarMJb+9wYr3UaxU5L + uPpWeH9zG28lWyMOZoXjiW+KWl4f+xQz8g09RRgFJHxMwC7+E1CxYwdsXgLar+I2MBGD6wFy2KGe + LMsqxaFNPTGaV1yyvfMG2PC2vG8QPHc7lcnJbqU9KQFjL3FJLbt76jEtXq8xgzxGU5hpDpa4gEMh + gSkat5lburmK6ooySjXCoMbGDriMR0DsRaqhu9wnPXEz8qmWbjSxlNlc3KMUUX5lJcSu255RXlj1 + UILp03MPHMNXFyhC57ESiONJuJSns5JiX0MILqbpdw2mDhG4aXAw9x5yi10YgtoVo2MfDllGS7PN + n/cwEYJyTA58MXy2RvHPHFy5al9A/EKTWEH9/MVXzAcLt7PPEc1bDVXkxp8QEXSVu41UfxGZkrCa + 6xLUhLGxRaHNC/N4gAuEAOu5REGo9g4+B9pkl4x9zCRheC7/AKhxVmdijX5v7l41VgvRRrxFEWJR + 5cQtqRRaWxXMoCnRESKbxmOKAtVQ2gB22fxMqsynJ+E8G5w08NQhe1mq+7gK2DVkETUUAHPDL11g + sOzshNIGhohmmyjLHQb07IpMvaZfkzKPZZKXtLEOtvP5hY0pBcdETgsHrqCTMCyES7oAUnyQJcew + b1xcUBRXf5j0isY/QYFTmjuiYRgQThHGMOx7mK7Np+5YpDd+7lJA5GLk4VGp4ThFFykAO0MdnlYS + kCDiZxKcJV1OTPEtsHLBLTzN5igC+SbI3ATh2SqA0QlEvxFhsfiL0txaVc3LoxbpY9iCS8HOv0jz + qqtZ8x1IAwOCNDinLkY4FxKt0ZlB27rMOnLJkxAD+BrUU59OxEdY35lEgpbPh3Ce8ci58ynLgpVf + uI6G9r8+uvv1C2A1fLTBrmpa3WYc/wBu/wBIOWz1LUw8jSQ7ITUJfreRgXlsWP8Asw0eEjPaHsEv + bwgxXDBvgeOcx0GqLqlXRjg5+iWmhgP0KB8RW2lPaVFFOhxe6K+7lxoDU5S1/tfcBYA1TqEhrvxG + j0S4FDBtA3TDGj7x/PDMe1oABW7t9SlgRTsS6u+ZSCq9peNNcWHzLq6UpSn+Rfioii1CJdbxdX/k + WzAOs7fDE/AVPrcvcFMDuF5CDZXIyyrJhOZY6VbsHmuYlHFVZZj5mKDjFtfmEAchv+KwymrNtsPZ + h+ZaO1M2cxfWJqPW5CBp3UUyALBTCeLWHC9MqNAtYR0wVgeDSNfkZWYQ4mBuEaklzPp7lWAZFvVE + bkX42dQwoKwMYCqe0eYfTdoboLg9Rlk1uuJdUJ27mqK5MZ47iEqvAEqK73cXTlW2KcRM0czhuVRo + dEcLIwiWSLCZeXNRSKjSkD8xHU23UJrzD1vwKfmalyIDQEwaUN4V7YbEqnEXwSgQop8sAK6ATUuQ + KYph9sOLmL2BBYmND9xC/Wzrj+5RdB4SkKQLpt5v/ZcKoMF43EjZQDlRxUACKvDOaf8AT48wYbme + mZYz+qWWyawJGoRpVFJzXXcJ11vxBGKUK9MMA8vmMC7vr9/mZnGcdqx/P5lLh/IUt+f5laFxvPVY + /mKRsoCaBb/iZf1FLxD5L2QFYXwUaDBRgD9slLwljdHnjA8QW3FIXQ1vfLCMmrUFLzj+kAbKqyuF + fXUogb8CnGf4gyAJSHI1ivEVxQwHt+1Ky1uy0669dTSFt0biqGkRzzLk7L4ghQo5Jmg4G7P8TdPi + 8waZLwQmHU7a+J/64qHDCRR8kTLVKtUwX1DbL9f0hsIyxp4pjULtDSOh35gr0Cy9+Z8JLlJGMs/J + G3b6qeh5PcECLDxTkuArqeSHl/hBpsbzMufnYhSCWMruXLrxmBkSDjhmYsCVAXIvEurJq2o5Zt3B + PFC5ZhoS/uMC3iX+cbI0FuJTkmwi/wAS4UTcPNSnjiYUNZgKOX2SvMBQXWFHxAUKQ7iYsTADPErt + 8tq/qGIRyllHQy7Xhq3oimFrptX+oo6W2UdQIZRSDAcjardsH73Ki61XiNtg+LOs/UW7Vibeazn9 + 1Bi5RRPFQqc6cmjz5sYWFis1yc9Z/rxN68JWRfPxNnlUF8ZKlm+XrzXmCi1c1fz/AHDz5I3oUuKE + XI0+pQFw2vrH9S7BeFeW6/fUYjbYgzwATQJYW/ZE6K7t2dopfPE0zmbLfEU9asr2rTz/ALLCxSoq + tgYKx7+PExK2W6apOXnMWwhgjF6f7/2GhocQOXfr/rEcLLaX9P0VMAOGa7Xf+Q0gE4P7vyS1DSCA + VT0f5EwQiwmMS0yab+f7msBG/f7f5hKmSmoQC0DdS/UhNkLRddQOytfERhqYI3/s74dTLpxqo4on + 8iAGfiAvFUVqCYhtNePfiHK9LdryBr7mbuc1h+b/AKm00Wcgwt3db4/6NkLGy7/DqLFsc6Zih8xd + dj/sbMMgDL/sEjbNao68ke5UNbhEdk2BAFgcvEyS09GpZtixsI9jFbosyIeSJG8duIwaw0x1zBgJ + R2JgMIsp2gyS4AYckuBy5gC07oGrx5oYt3QL2/LFcXAm0dJzF2A2OwO2EW1oa0fWJg8ymlBOuKzK + pUVup/kjrqGyPdufctvpIyB/EdemcGA8xQGIhz8uCWAr2BKKkNg/SUXt1+nf75l2jAGla79xzbtT + GTPf9R47NFD7lxUNW/1qcTO13Fauvy+onPcYt2dwzumRffqDWKL1V/EEiiuxVZXt9y61ZDOcv/Pz + FdSr9XBBM4lPBEra59Bq325lWmrPx1j6mdR0Loc/LGzysWXmCiXFKvqoPMzAsop/7HloLZDZd54+ + eogJZYByC59d+OoX1IUaLVq3/XcKtqOwwuPpzDUEHsyOc/iWbNW7M5a+yHVoB8Nc/wDZup0/l+/m + VkFmX3UsdgYYe0Cbu6T4+YS7TsaU3x+krMA6OQ+eITBLLsgmDQ8Swhk2bnJlu037P8lyTaRVPvcI + JYxmIMjdj3iYM3hfCqLnZKNMcvljIg3uWa3eBcMC4Gac/Uo9IwTNHZBdQSJ2QRDzvo6fPcwis6QP + zc2bfNXu5fGfE4Do/LzBgkNtqO6umsQIxtwAiKwLZW/KGcnkXczy0vTb5IcbjBuYyqzg0eF0fmXh + gqnQ+gIstavND05hQsMiWnbY0e4xtrQGtX0QplKZC9Ay8xOhB5wZ/EVuGmufpWuvcKag4CeTRywD + ihW2d3OIasQItbrV3mZKaueE+oXA72MSu2wIC9r8yqu8VGfSNfEtjWJSFexXNVrEtSmRf/eICCgL + 6HzBtnAyiiidwmFNF4L1fzUGwW1Yr9QolmcL5QBqJa8YjS2lZDYSgVmY2Wjf1KgDK2VY5N3f/vqO + hDJWX0r+K9QrKoWu8d4g1kLIsZEd0nmACDoF/INe6IgqACnZwKeYLslMgtrqVqnDNOZTshvBfycw + evDL/SGKhfID2PMNQA0WcZ4zV3/UxBVgKmWnHGM4ibx9upbsTPKwWhkAiKXaXrJf5Q4KLU7P9TSO + 60MhzKyosxjWv6uGZwtOF4YAhTniuP5uVg0VzvuHg7PxCol3bJdkSQ5fJ7iEWQyGPxLABcjkYWHS + zFzTum5VvSe3DC10Gun+v7jpyTIn5YhN19rmBxlGVBVRceGm3zDq7QQ0Rh5q5jVY8yhZkxeMnmc4 + w7Gkgm7IQKKq6rNWz8Vn4hkaVptUyvDVzK/bP437iFArlMV5cbjsG9WJR5XL1mGsQXbUdVe648y2 + bcZk8/8Aq4AK8Cn8CYEwyVfk4gSZ3JXHLEQLtJhOr/1mUVWMg7rl9ZlPAjkIFVVOnH8RqxwHEt82 + I/E2OBrZLgP6REqbBKr8XvFQPv7RjXLT65/5DgSJVbSWesNzVIuhRSiZO4dYl1FleMc9tRUt6Ivm + ho/mGFSSUlKvdeoA2Y0HHtzCVTTGVb50PmJJG1C2xnjjwSgiUnE1Ylf3N0yWAavL1Tv/ACVLC3YX + +WN4GTIQvVBjEsRixtBZsz7PslgEUomfg5gcDQlQN9s+PzEaglqOLl2iWZ96i3CMC8/oQHt8PY7/ + AJgEr0BwH+agFYFGhsF2QQtOFars8Sij7LRyjwX37ijiG7VHRj7hWmMGh/5BAOzSLHsxArA5af6j + sJhaDnycTO+gDBrdzAImnKp17x8XHJTar8D1+cX/AMUA2AUFm7cfvicLLbKVea+KycxoVDApa991 + bAKGsRlB/wCQgolWDCgB/P8AEKxRIBfDLQVPJ5uCtkvLZWzTKYkFt2xwBhzx1n+Jm1eBb+JbALcj + GS8o00wkudGymq7jqBS3iHVojQ2OIZSEfkmONY3HsJSUN4lI6YrCqv2s9RJtfiIFg/XMoTZCLEBT + w4ceY5OQXCWJhbHCPjuFKC12QxNTNDjMa6GA4f3uZwl3MteYs2oFKY6fnP5gH72lVeGrQ0+N6a5h + LGCvOdFfvuHN1VqrNZ5fwxgRdIsGu5QccKE6efBiiuisnCqxWXq4XSMkE3KNDdvFialujLS21YJu + Kh7WgvjG+Ifdg7QX2lfnEyULMBycx3qigmqvWc7N/YRqIPCHvdcWcXzPaksFrCUvN/CR5HvCRdO6 + Tz+ITMCedtjf+YiazLaqLAtdHIx0slZscA0worGBKeKwfmU01zOPBOzuWTcsoDe7yfmEisXS2/i0 + PUNYEywXrTD7ifWGrkgq2eF4vuWsJLWS7o6gH0LVxxT72MMtA4Jmr3brFwNVwFWhcjXXx1AeCjI0 + reib4jR6BYoidZ8S+E5IsLLPdRjLbzYbmBmly0fjmWA0zZXnUVx03ai7PH1AUOoMuUaiOdlUAov/ + AG5TzKZBn/upaweQ8L9mIEgtAVgHVdcVFAI7QMSwOmwNwME933Kp4CXu5UUIFMFz8TQxgsKXoiM7 + wrQd2YVyRyGICrrh6HOtYiBuX9l38X9ypSHmNm6fO8+4HCyXQxd2P76jLKgVbHf4/iZ1HO4q5/tM + 1gCtGZS0DLnEpADPb58wrPWFQzDbeXzDYFOvcYLT0KplBjQy4cQewRxLkATjRf6jcB8bA6fEUCXR + X2xrrmgVlHOJpCpoWQLWt1/kzhsJbMBXKm4osNMH5JCKT7ExGFXRv7hl23oUrdNZ0/8AIdYNsoY6 + 1+7gcqSLF/eGD8JFaw9/xLSBwWOI1ViYtRuICbQFwVnX1ALDBVg+OM9X5ha6LM9WXn8dwt1gMg2X + 2YMNxyLTakNLS6xXBGAwgQDjn9Oobll1lVWl8D1KwcIDQDFlZfyIvAZdAbH3XMQFdrMwccb4iBFV + KUNZKJr/ALZEigYCD1ZX39dUt0Tg8uWm38+4kg3cosWX11vWZnJCXd2/fEzORcJY36GGuOWxx5+I + CfNYNNNZNj+7jLGrZjLwUGITV3k09Dn1AqKo0WliqZp6WN1cPAUgeOL+o6Ao24XKY7dNVsndmwf3 + zGiAskZvhar6lbdpvLi2tcQdNBlBnVa5Jhcst4t2lnziXgFKGX4tpr4lZpQqBTOM/Gu5f1Rac+Lx + qooKbg0C6+UQexDDzmXoLlo3AmMuKO7r+5QL0MvcWuBDnnV16YLLOZTe3P2fbGlDOcp9xqIqxZtF + /wCEcw831KocQK++YBaTgvUfWABzbtPg/MOA48brdRdUstLv34lgBb0DL1zR+eIqkWp4Xjxj8zDq + GAFrbmoomFwsZqNJ8ALs5vyZiomgnarKgNvSPK6/54jkhMO6O4FCPmBi7Pb4SEUOhr1zwsRK8UUv + pWGh0Kvq2IxjIW3z4ZPYS77NFA3boK5SctKzv9HzLe44DKdssKugLY1y/wCQhV1gMlLuzsmQxi3y + Zs71BbISB7FO+IipvtRLVy0vLeZRbjXaZ5q/EsQQWqtJ+W8xEzTTkWjTdj0mKi5t7x1Ybar65jtC + 0sAE7vL1rERXBgkgXhvrEpRIU4vrvxDGW1AAzQvLnvqYhGbind3sbjksBWrV3V4yViGXRYBGXOvb + 8e8F4iFUnmnP/tYinjiWyZs/pj0XLfIDivBxGnIM5RNqzV/nXUMIVosonbDFmeImgG3FlKwAQQTP + cSFDglwlmOO5lqURR4OEmeVnRcvx9QouU2homcYsiooxNDF5xT0yIuYYVrpDK4Kwtgxxn0KJQsoq + dPJ9RbFVA4PUCK9ysFRCLS0KD1WyVZWUsPLSufH1LIcBSs5vMK5YZ5cxeNgvKB2t863LPI10tl5h + WKq2KHmjXb8w14gFwDFZqqI7wF4Jefs+YzQdwEf+sa8xTjghVbNU1ovMBZaqYx8ufUpzgg1pZn3j + UG2okyrSsqVGiXXB+/c1EA0ufb+YbEtHRgrmZtplXZ/5ZMYgCF5dXdbqvkg0aFDgT+NSpFpxNcWz + DFhQaCP4IkFyO3/kXEA4KolZhdhn8StOhxE8XiHioubIbXv9I/ImltjVcKcvZK8ErhQqsv13wkoM + 24UrljH5qF2mjjmvR8/UFlIhVd3RAXSFaYLVG5QZG6UVWH/Y9hUM1qv7SgKtn3DgMHe4bSBGCCoO + Ui4fKXXyQqQ1BrY1Y9y7lxg4ZfYmDQfk4zaccylrLrfm2eCupoloVB9aqWGnGzDzRzfFcxYILaWB + nI8vjUPEchTSFNi28H1BXWicB00FtHPuY3NFHpy7/wAluVAWvb38sx0scZEre+Ou/UACCs0nHgMW + /UWMDiR5YBam+sxDNbb4A098efMojCLAk06tajexO+Azo7a4uDQsMDRTZQPGnKYiAVAuQNgaC8zF + WBaCc1VoeOKvq4tk0FG0NaRXJnfhl0FqJWka2CLftTPFaJ+4WHGz+MiNSUNtusCzd/esQYVFhAoz + pKbur1jiHmIKgEq6ppVUVZctAhgqFw1y/fqVUkLtK5Htr1BVnCqmltaux45hPLg3bBWF6de41Z6w + rB0xrGV5AF5XPlhYDgTYq4vxmbS1oysZ7emjH1gDkFAh5U1cTwYywhBpqleO9RRPiFPA+uf6igl0 + W3wzCOuAjrmvmGhxKBCkSb9LaZPrEJBzZO3K7CFqlXY8Z364gWCASGHXJz8y7whnRlneliOjyXgb + qu4t5Omjksv/AGItXUQRvRjxz5ll1YFGSuWXQyCZrPUoIvIub3RUetgWBpSP+x9IYfH6y5KJbd4J + YgVMDssPqyMugBNH72RUqDLDwPX75lthKRMZcPDX4ljNGH5efqNixUO7bb+bhTZrhXR7/mInIWVD + 287hkXMHHKHxMJym0PjnMBIdza6D/hr3BIfBoHRz6i9RYU25fWoFOVgTktA7rnzAthU0C6gpirhV + k0/u/iNCHfuv/wB+mLYEZKFpojs5yWcXx/kLungVuP4AO2Y1YeFiACig8wqJZCTtwioccRfFGjot + lmeoSDVqNijm3wbFdR10oDJmsXlz+4hcS3w0yiKwiUwzg3cNvQdMit+XG85hBU9ZXe6vkel6qZYs + Tsms89Y1QalVK3KTPz8vmDzwugKtpsMbvUSULABW+jTr2D3HCOpppX3b1MPRQ5H8dfErFqyX+FG+ + yXIxEtAICLoLoLvxMkVm55yy/thAaM04fxXuU1IRYOLgd6rPnmLG2kTD7Gv3cOWuVdPBfGYSQMRB + 8KaPV/zGxAE5jWKfHUYaxcppaVU1m8XvzLEg6UooLQ2aaPUWVaDU2Fw2cWY4zEZWGlD32IbRrIwP + kb7fzORDgVY0G8X9lypFilreG+riEgSDRSvwt4fZxFzd3LP5JjiSQMtZ55ruuOEpVqqpCoeKPzD2 + oZpSbaHRu/cVUlYJFT+evqBDuXlm+h79zZjhbtw1i/6pxdS6QMl2xa/qWIABMF15f3iKN5awwzDE + dAn1ULwUVO7DbHIsDwPkOeKIxDGHitfDeMcxcFrKZP7eYpLItqi3j1AZMfjHEQALk2+IU3GAPmh5 + YY5CBux31K7pUE4D+xlzy6le3/s9ddHQcRqxYHyRb+4NNV1BxWD8XBUihdCu/IrrG4ZqbRKs/Jum + AFSI2OzxDxiWXJ56uCFvc75mLXbZo17lQEgMKpS/xNqZBYds1AzjUtRUc0EO5R06v+IaX0bgM3FD + KMeBgoOIRgsxvr/yZ4W413MLyaSDYw4m9f8AIuSOTrs/0QqcKuw26yGpWC1mi2eL/wA+4pz7IbL/ + AOzPRthTrnOvzBc9HbBCM2woWxeN7pErL6mFPIL2y8NHG3HzLL2UIDPil3XiLHBYb6M3q86aiDdJ + SFF3486m0l0EDSoaJ3z4qO6AyWgPFxwDDYJfuyUEljZDxeOc/mBdddRxjuuqhdezBC4Ngix9KYx9 + gEY1b8O4D6mia293V7/dopJathxeNdQrB49Kr3K9EkoWC/o98R2gMByBhre/xE4ibtlQc8+4c3WA + WQBdEpLx8Rue83d3+YgsNgtSbcUOc5uFafAXIdH4/wBzCJJUAUWVwMOM4r7foDUttGDDr48IYElM + jWBdLzv6hcyUqG+TpNwjcCqM1m/rHqCh1gq0l4zety2rpjahLqnrLFSPXtiZfkzDFtHZ3Rrxr8E2 + NTjlCrdlnwXG8SQK2ZbzCbBRzyoay9G+viHaGU3peE6bl0+ltqxfiI1xWI3Ze7b3S94gQWNdbv4+ + HvfEKGZwVGPMamrWu5CvIQcJa+a1M4pYjmKMYK/cQo94OF/5D/WLGEpuuN4gVBtt4wi1n3AFaYS6 + dWfEvAbC8BPN8ylPDs0P7+ZiShaM38wpwC2YTNHqWIAZzReZXEuKaclx3giYqq3n97j60rwqv+pn + pQdg3/cuDbNNJEhrYxQs4a9RiOCG3H54nChSdxFLdmoFEJo7OPxKzF7ltXA8/wDY2vhZ1cwzaJxe + 0DzC4JDq8j+rfMqHK0bB2Y9V81GawJHQt0+8wbwXBwP+S/gXHZS9vwX+I30jdd0mDEHRtqP/AFRI + oXbmUrSVn93BMjmqoyC2uMv1KyRyut8aS/HCxXYiAGicFOIEykqNKIXi/UYIigY0b46bD4gQAgRN + Kw51qosjup3Z4XTYfEqYAYQ18RkuhTAj03d/xM4GRGNetQWqGbqni31+1FVC4Da2TqrghLXcRWm2 + bozhPnUplAykt7XvXUfUYK7fFppwkdjUErS906HXEErs5FhcWLzrOY1oqwjAaaDWSNGr2k8FKXuE + zEFRleXlo3/koyqgwmzYuAty1edSputFC9EHPn5xiouUiwBvu+uHkzBvLNJkWkrjCTBiKibPHiWK + DYcPA9uNy8rCoQoS+Ct2d41Cl3BSd0d0Z+ZQsZzAsYd9hgDoodAvnnmOxS1sreQA5DHMvDiGyeQL + x5isTwGcAiZrLFFzEOVnjNu9cZ8zEIG7PlfhfeYTK4wgDm6v3Z8wi0BXNpKctuJYlGwiCqb5p2cw + 11KojkU3R2kGamKIe48yqHRVIOtSsvIJ3m604YE2Fa5Wc0Vrz/ETFEbkpY/EV4Shtoxp5iqMZrkB + 4KgBaW1yY6gyocDOGn6jlhszYG3viKkU5S8ObxY3/wAhCqQWstnGufPmZ4WhBkc1m91xBeBloawq + r5z+JVWb5Mwb5Njfk0f36lgHGy6H5Of6llisg5htUrHbX2qJ6AaGD36/2IFXUW58XDbWpR9j+8Sq + NWb3FWt8c/crFhslyHmBmprm2eyUHEAYh7P8lvexh5/bis0VFr1kpT/Yx0hQNvRX5l3csyleiZQ8 + 3dFq175YsYM2ut6+iI8U7OgD+PzEAQKJbYqG6hQXHgD7/MznAVCU2qKbUwX2y8YAFEMNly3vcwUw + UR1vzxCvZldlWG7CkZZNcCOjdvnNY67gzWdg2h4/DGurnjdzyBf7XwNBueBeDNOr/qWjC4w3WNep + YLaVY+1v9qCmL5S4DdKXV8xsRdhBa1WFvdtfiZdhLK9BNcH7kJVxPIO675zLQ/bBFNuHWyYI4YUt + zeDXHUrFVbAbyfCAZ/biOtu68M2DvL3M8GWMLWyzvg6lFk9rYXo2VmpaaDxApOW2645YWxm9DVXz + ycZ/EUYQVKDFUeK4b1DWGEoJazAe+5WULGxK6fPOPEWGlobHirPi/wAQDfPTxoqhHnW9y+KcxqoZ + Rw55rJWIpkg2BW709ymGJlCjjzs47+JZ0qzOV/v0MIHEdERyJXGribsGoYBx27i2EPgPiFb0FSRL + /N/iJha4pKUNjum/M3wHa6K+i/nUNPKyrTreOj7g3YNhGhmGvPNyiriVDHFPunzLWBCojvoNJnnM + CruluN1fJ+5l/UBsNxbocFwAtstWyxYtU5MEQ0e5YoTzWbigLpcKbx7TzHXlokUr4JzeOZTTbTT4 + 8QwYvAH73LYqXUqnL7FleupgAtCOxy/MCyJYCLfF76s1E2lHGaIPxjXm5TpRsRGHkmE+I27LhXne + Cm9PXuOUVVbV5GtBn7gTnw8wHD/JzC2CAC+nrccptoWSv5S8TJI7w3XP/YMBAkFNn/Q+phFBBXSu + InKFDajWHzBFs0NNFnXfE4RaLwDfHuVjLI8l4A5lLUO8SiopeCqjjzsDnDNvOs76iUy2xEp6/uAC + QXAtwHOXT8eZbAFrrSyvmDjhYnOm/EBYUE017+4d66IWpxj4mIRtiVW8D49TEAVM2evuUIlddqJs + vjCWb3mJswoiwprVX/Up7ybDwxmjzjwRAvHBWFxr44qHKXRqlPGr3r3CiVo5VtQXn7Zi7aFlJquL + L8yhTpq+nTEqC66IJV1oHXXiH41gpiZ1VZwVxuZHdQDTea6cUP8AFx9qAXUioFDrJeE3zMK6qhnd + dG894lLOhscJdeeYDFzNLQs9eoiUauLlrsryahWyGCr6o6z577lfJacVDy+w+o4DdVIDc2Cuf+y2 + MroMBhTQksLBFsLRgLsrcGVtRLXhp/H1BpVkoTNpRhP4lu5PVQ1g3dY+8x0zIQUK5HKFheMxxhGu + KXim/rzK1rDRbr0w2c1cqGOhFD0o8cy3BczUOQ128+Y+uKHRam8PHmFRaoGwe6sc/JLrIqNQRdLy + uIewmGZC9I5Ov8hZXWyWIUrhDEvqaNGVqS7wH8EQ54jpV52+u2BLqKpVDkOnL8VEhqkUU7nArRDO + M1mKCVUazZ38wRHWLbHzcaawOmbDmVHNFNgNRDFH5SqjsKtZe2Vlqaz3lFUAQJS6deOcQaXgwaBw + 06amb+sHRQL1BMi8lQ33ziKNgco5BZ5S/wA+IDBFSiuQpEr8ytVotQzVmbjW7PBp0df1HwWyzId6 + uVqLusu2/nEcjSosuOKt1W5YZWtFaDrthCkbqtbs3HJS2NoEbl27JK8xVudZfrvxKUg3L3n0/Ny5 + gem9cnu4h2StZafMMvNYytedH8zDVvKObYspglaNc+2KVADsFsznmr4hYzRWz21f5iKlLzTDi6+Z + UUGQBZu3PHOsdxMBcVy2XK5qhgMJdV6JbTGzRk/2WkXdgxyMxBoGx1TQ/LuVg7OKMN04lw4aQ4Hv + XOOoYe8hLDx+7iFBWWZNb1n+8VKOwCwvD65/iJcQFBvxKBJsADl8YxWMaauZyQbLDGLqMiapsoL8 + Y06/5EsgIzd1hqBCxkL+1KyEC1S23AC19G4XCVzdFeL/ALgWhhYGa8Db/EuLbdbjlpecxLrpVL53 + l/2AXEUbKLwjfUPmZgKB0F/MUkTsK/PkYho1Qivu6tou84jeRKFL5LUsNPDCRmoVyNcN1ebjq5IX + q638dd3zGSLTav27u4aYUUKcg2Oaw/E0dbKjL1/UUGKsGiuPTAsOBDWDYW/UthdZUlrfL/yIC1oC + tfviACGDWowari/qDfgl1SYKHe45fgoFf4b9S/ioAoJug4JZYhIauVW2tuiZ1ldzT2NZzxFpS00J + glItdiuoy15hT1cXKgBjlW/5jpAMclivLdRoOK6TuOHAax9M5l6w9NT/2Q== + headers: + Accept-Ranges: + - bytes + Access-Control-Allow-Origin: + - https://huggingface.co + Access-Control-Expose-Headers: + - X-Repo-Commit,X-Request-Id,X-Error-Code,X-Error-Message,X-Total-Count,ETag,Link,Accept-Ranges,Content-Range + Connection: + - keep-alive + Content-Disposition: + - inline; filename*=UTF-8''tiger.jpg; filename="tiger.jpg"; + Content-Length: + - '62971' + Content-Security-Policy: + - default-src 'none'; sandbox + Content-Type: + - image/jpeg + Date: + - Tue, 29 Oct 2024 18:11:03 GMT + ETag: + - '"3875afc946bd1cc6b4305ff74045c12164f95326"' + Referrer-Policy: + - strict-origin-when-cross-origin + Vary: + - Origin + Via: + - 1.1 d03f5e49ef8a75531152544d3c363680.cloudfront.net (CloudFront) + X-Amz-Cf-Id: + - VIdhsGwYf3_gtCNux3kU8T94IbyUI9E0CITcIpjl-2yuSNq__CI72Q== + X-Amz-Cf-Pop: + - CCU50-P1 + X-Cache: + - Miss from cloudfront + X-Powered-By: + - huggingface-moon + X-Repo-Commit: + - d6d15eabfd53d7a15dd553513e33262f320b210d + X-Request-Id: + - Root=1-67212537-5cb25f464ac51a9617e55e79;dd55291e-a91f-45dc-9e5e-c2e98ef87a4a + cross-origin-opener-policy: + - same-origin + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate, br, zstd + Connection: + - keep-alive + X-Amzn-Trace-Id: + - a7327634-8511-4639-b52d-b0146d00c777 + user-agent: + - unknown/None; hf_hub/0.26.2; python/3.9.12; torch/2.4.1 + method: GET + uri: https://huggingface.co/api/tasks + response: + body: + string: "{\"any-to-any\":{\"datasets\":[],\"demo\":{\"inputs\":[],\"outputs\":[]},\"isPlaceholder\":true,\"metrics\":[],\"models\":[],\"spaces\":[],\"summary\":\"\",\"widgetModels\":[],\"id\":\"any-to-any\",\"label\":\"Any-to-Any\",\"libraries\":[\"transformers\"]},\"audio-classification\":{\"datasets\":[{\"description\":\"A + benchmark of 10 different audio tasks.\",\"id\":\"s3prl/superb\"},{\"description\":\"A + dataset of YouTube clips and their sound categories.\",\"id\":\"agkphysics/AudioSet\"}],\"demo\":{\"inputs\":[{\"filename\":\"audio.wav\",\"type\":\"audio\"}],\"outputs\":[{\"data\":[{\"label\":\"Up\",\"score\":0.2},{\"label\":\"Down\",\"score\":0.8}],\"type\":\"chart\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"An + easy-to-use model for command recognition.\",\"id\":\"speechbrain/google_speech_command_xvector\"},{\"description\":\"An + emotion recognition model.\",\"id\":\"ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition\"},{\"description\":\"A + language identification model.\",\"id\":\"facebook/mms-lid-126\"}],\"spaces\":[{\"description\":\"An + application that can classify music into different genre.\",\"id\":\"kurianbenoy/audioclassification\"}],\"summary\":\"Audio + classification is the task of assigning a label or class to a given audio. + It can be used for recognizing which command a user is giving or the emotion + of a statement, as well as identifying a speaker.\",\"widgetModels\":[\"MIT/ast-finetuned-audioset-10-10-0.4593\"],\"youtubeId\":\"KWwzcmG98Ds\",\"id\":\"audio-classification\",\"label\":\"Audio + Classification\",\"libraries\":[\"speechbrain\",\"transformers\",\"transformers.js\"]},\"audio-to-audio\":{\"datasets\":[{\"description\":\"512-element + X-vector embeddings of speakers from CMU ARCTIC dataset.\",\"id\":\"Matthijs/cmu-arctic-xvectors\"}],\"demo\":{\"inputs\":[{\"filename\":\"input.wav\",\"type\":\"audio\"}],\"outputs\":[{\"filename\":\"label-0.wav\",\"type\":\"audio\"},{\"filename\":\"label-1.wav\",\"type\":\"audio\"}]},\"metrics\":[{\"description\":\"The + Signal-to-Noise ratio is the relationship between the target signal level + and the background noise level. It is calculated as the logarithm of the target + signal divided by the background noise, in decibels.\",\"id\":\"snri\"},{\"description\":\"The + Signal-to-Distortion ratio is the relationship between the target signal and + the sum of noise, interference, and artifact errors\",\"id\":\"sdri\"}],\"models\":[{\"description\":\"A + solid model of audio source separation.\",\"id\":\"speechbrain/sepformer-wham\"},{\"description\":\"A + speech enhancement model.\",\"id\":\"ResembleAI/resemble-enhance\"},{\"description\":\"A + model that can change the voice in a speech recording.\",\"id\":\"microsoft/speecht5_vc\"}],\"spaces\":[{\"description\":\"An + application for speech separation.\",\"id\":\"younver/speechbrain-speech-separation\"},{\"description\":\"An + application for audio style transfer.\",\"id\":\"nakas/audio-diffusion_style_transfer\"}],\"summary\":\"Audio-to-Audio + is a family of tasks in which the input is an audio and the output is one + or multiple generated audios. Some example tasks are speech enhancement and + source separation.\",\"widgetModels\":[\"speechbrain/sepformer-wham\"],\"youtubeId\":\"iohj7nCCYoM\",\"id\":\"audio-to-audio\",\"label\":\"Audio-to-Audio\",\"libraries\":[\"asteroid\",\"fairseq\",\"speechbrain\"]},\"automatic-speech-recognition\":{\"datasets\":[{\"description\":\"31,175 + hours of multilingual audio-text dataset in 108 languages.\",\"id\":\"mozilla-foundation/common_voice_17_0\"},{\"description\":\"A + dataset with 44.6k hours of English speaker data and 6k hours of other language + speakers.\",\"id\":\"parler-tts/mls_eng\"},{\"description\":\"A multi-lingual + audio dataset with 370K hours of audio.\",\"id\":\"espnet/yodas\"}],\"demo\":{\"inputs\":[{\"filename\":\"input.flac\",\"type\":\"audio\"}],\"outputs\":[{\"label\":\"Transcript\",\"content\":\"Going + along slushy country roads and speaking to damp audiences in...\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"wer\"},{\"description\":\"\",\"id\":\"cer\"}],\"models\":[{\"description\":\"A + powerful ASR model by OpenAI.\",\"id\":\"openai/whisper-large-v3\"},{\"description\":\"A + good generic speech model by MetaAI for fine-tuning.\",\"id\":\"facebook/w2v-bert-2.0\"},{\"description\":\"An + end-to-end model that performs ASR and Speech Translation by MetaAI.\",\"id\":\"facebook/seamless-m4t-v2-large\"},{\"description\":\"Powerful + speaker diarization model.\",\"id\":\"pyannote/speaker-diarization-3.1\"}],\"spaces\":[{\"description\":\"A + powerful general-purpose speech recognition application.\",\"id\":\"hf-audio/whisper-large-v3\"},{\"description\":\"Fastest + speech recognition application.\",\"id\":\"sanchit-gandhi/whisper-jax\"},{\"description\":\"A + high quality speech and text translation model by Meta.\",\"id\":\"facebook/seamless_m4t\"}],\"summary\":\"Automatic + Speech Recognition (ASR), also known as Speech to Text (STT), is the task + of transcribing a given audio to text. It has many applications, such as voice + user interfaces.\",\"widgetModels\":[\"openai/whisper-large-v3\"],\"youtubeId\":\"TksaY_FDgnk\",\"id\":\"automatic-speech-recognition\",\"label\":\"Automatic + Speech Recognition\",\"libraries\":[\"espnet\",\"nemo\",\"speechbrain\",\"transformers\",\"transformers.js\"]},\"depth-estimation\":{\"datasets\":[{\"description\":\"NYU + Depth V2 Dataset: Video dataset containing both RGB and depth sensor data.\",\"id\":\"sayakpaul/nyu_depth_v2\"},{\"description\":\"Monocular + depth estimation benchmark based without noise and errors.\",\"id\":\"depth-anything/DA-2K\"}],\"demo\":{\"inputs\":[{\"filename\":\"depth-estimation-input.jpg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"depth-estimation-output.png\",\"type\":\"img\"}]},\"metrics\":[],\"models\":[{\"description\":\"Cutting-edge + depth estimation model.\",\"id\":\"depth-anything/Depth-Anything-V2-Large\"},{\"description\":\"A + strong monocular depth estimation model.\",\"id\":\"jingheya/lotus-depth-g-v1-0\"},{\"description\":\"A + depth estimation model that predicts depth in videos.\",\"id\":\"tencent/DepthCrafter\"},{\"description\":\"A + robust depth estimation model.\",\"id\":\"apple/DepthPro\"}],\"spaces\":[{\"description\":\"An + application that predicts the depth of an image and then reconstruct the 3D + model as voxels.\",\"id\":\"radames/dpt-depth-estimation-3d-voxels\"},{\"description\":\"An + application for bleeding-edge depth estimation.\",\"id\":\"akhaliq/depth-pro\"},{\"description\":\"An + application on cutting-edge depth estimation in videos.\",\"id\":\"tencent/DepthCrafter\"},{\"description\":\"A + human-centric depth estimation application.\",\"id\":\"facebook/sapiens-depth\"}],\"summary\":\"Depth + estimation is the task of predicting depth of the objects present in an image.\",\"widgetModels\":[\"\"],\"youtubeId\":\"\",\"id\":\"depth-estimation\",\"label\":\"Depth + Estimation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"document-question-answering\":{\"datasets\":[{\"description\":\"Largest + document understanding dataset.\",\"id\":\"HuggingFaceM4/Docmatix\"},{\"description\":\"Dataset + from the 2020 DocVQA challenge. The documents are taken from the UCSF Industry + Documents Library.\",\"id\":\"eliolio/docvqa\"}],\"demo\":{\"inputs\":[{\"label\":\"Question\",\"content\":\"What + is the idea behind the consumer relations efficiency team?\",\"type\":\"text\"},{\"filename\":\"document-question-answering-input.png\",\"type\":\"img\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"Balance + cost efficiency with quality customer service\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"The + evaluation metric for the DocVQA challenge is the Average Normalized Levenshtein + Similarity (ANLS). This metric is flexible to character regognition errors + and compares the predicted answer with the ground truth answer.\",\"id\":\"anls\"},{\"description\":\"Exact + Match is a metric based on the strict character match of the predicted answer + and the right answer. For answers predicted correctly, the Exact Match will + be 1. Even if only one character is different, Exact Match will be 0\",\"id\":\"exact-match\"}],\"models\":[{\"description\":\"A + robust document question answering model.\",\"id\":\"impira/layoutlm-document-qa\"},{\"description\":\"A + document question answering model specialized in invoices.\",\"id\":\"impira/layoutlm-invoices\"},{\"description\":\"A + special model for OCR-free document question answering.\",\"id\":\"microsoft/udop-large\"},{\"description\":\"A + powerful model for document question answering.\",\"id\":\"google/pix2struct-docvqa-large\"}],\"spaces\":[{\"description\":\"A + robust document question answering application.\",\"id\":\"impira/docquery\"},{\"description\":\"An + application that can answer questions from invoices.\",\"id\":\"impira/invoices\"},{\"description\":\"An + application to compare different document question answering models.\",\"id\":\"merve/compare_docvqa_models\"}],\"summary\":\"Document + Question Answering (also known as Document Visual Question Answering) is the + task of answering questions on document images. Document question answering + models take a (document, question) pair as input and return an answer in natural + language. Models usually rely on multi-modal features, combining text, position + of words (bounding-boxes) and image.\",\"widgetModels\":[\"impira/layoutlm-invoices\"],\"youtubeId\":\"\",\"id\":\"document-question-answering\",\"label\":\"Document + Question Answering\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"feature-extraction\":{\"datasets\":[{\"description\":\"Wikipedia + dataset containing cleaned articles of all languages. Can be used to train + `feature-extraction` models.\",\"id\":\"wikipedia\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"India, + officially the Republic of India, is a country in South Asia.\",\"type\":\"text\"}],\"outputs\":[{\"table\":[[\"Dimension + 1\",\"Dimension 2\",\"Dimension 3\"],[\"2.583383083343506\",\"2.757075071334839\",\"0.9023529887199402\"],[\"8.29393482208252\",\"1.1071064472198486\",\"2.03399395942688\"],[\"-0.7754912972450256\",\"-1.647324562072754\",\"-0.6113331913948059\"],[\"0.07087723910808563\",\"1.5942802429199219\",\"1.4610432386398315\"]],\"type\":\"tabular\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + powerful feature extraction model for natural language processing tasks.\",\"id\":\"thenlper/gte-large\"},{\"description\":\"A + strong feature extraction model for retrieval.\",\"id\":\"Alibaba-NLP/gte-Qwen1.5-7B-instruct\"}],\"spaces\":[{\"description\":\"A + leaderboard to rank text feature extraction models based on a benchmark.\",\"id\":\"mteb/leaderboard\"},{\"description\":\"A + leaderboard to rank best feature extraction models based on human feedback.\",\"id\":\"mteb/arena\"}],\"summary\":\"Feature + extraction is the task of extracting features learnt in a model.\",\"widgetModels\":[\"facebook/bart-base\"],\"id\":\"feature-extraction\",\"label\":\"Feature + Extraction\",\"libraries\":[\"sentence-transformers\",\"transformers\",\"transformers.js\"]},\"fill-mask\":{\"datasets\":[{\"description\":\"A + common dataset that is used to train models for many languages.\",\"id\":\"wikipedia\"},{\"description\":\"A + large English dataset with text crawled from the web.\",\"id\":\"c4\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"The + barked at me\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"wolf\",\"score\":0.487},{\"label\":\"dog\",\"score\":0.061},{\"label\":\"cat\",\"score\":0.058},{\"label\":\"fox\",\"score\":0.047},{\"label\":\"squirrel\",\"score\":0.025}]}]},\"metrics\":[{\"description\":\"Cross + Entropy is a metric that calculates the difference between two probability + distributions. Each probability distribution is the distribution of predicted + words\",\"id\":\"cross_entropy\"},{\"description\":\"Perplexity is the exponential + of the cross-entropy loss. It evaluates the probabilities assigned to the + next word by the model. Lower perplexity indicates better performance\",\"id\":\"perplexity\"}],\"models\":[{\"description\":\"The + famous BERT model.\",\"id\":\"google-bert/bert-base-uncased\"},{\"description\":\"A + multilingual model trained on 100 languages.\",\"id\":\"FacebookAI/xlm-roberta-base\"}],\"spaces\":[],\"summary\":\"Masked + language modeling is the task of masking some of the words in a sentence and + predicting which words should replace those masks. These models are useful + when we want to get a statistical understanding of the language in which the + model is trained in.\",\"widgetModels\":[\"distilroberta-base\"],\"youtubeId\":\"mqElG5QJWUg\",\"id\":\"fill-mask\",\"label\":\"Fill-Mask\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"image-classification\":{\"datasets\":[{\"description\":\"Benchmark + dataset used for image classification with images that belong to 100 classes.\",\"id\":\"cifar100\"},{\"description\":\"Dataset + consisting of images of garments.\",\"id\":\"fashion_mnist\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-classification-input.jpeg\",\"type\":\"img\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Egyptian + cat\",\"score\":0.514},{\"label\":\"Tabby cat\",\"score\":0.193},{\"label\":\"Tiger + cat\",\"score\":0.068}]}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + strong image classification model.\",\"id\":\"google/vit-base-patch16-224\"},{\"description\":\"A + robust image classification model.\",\"id\":\"facebook/deit-base-distilled-patch16-224\"},{\"description\":\"A + strong image classification model.\",\"id\":\"facebook/convnext-large-224\"}],\"spaces\":[{\"description\":\"An + application that classifies what a given image is about.\",\"id\":\"nielsr/perceiver-image-classification\"}],\"summary\":\"Image + classification is the task of assigning a label or class to an entire image. + Images are expected to have only one class for each image. Image classification + models take an image as input and return a prediction about which class the + image belongs to.\",\"widgetModels\":[\"google/vit-base-patch16-224\"],\"youtubeId\":\"tjAIM7BOYhw\",\"id\":\"image-classification\",\"label\":\"Image + Classification\",\"libraries\":[\"keras\",\"timm\",\"transformers\",\"transformers.js\"]},\"image-feature-extraction\":{\"datasets\":[{\"description\":\"ImageNet-1K + is a image classification dataset in which images are used to train image-feature-extraction + models.\",\"id\":\"imagenet-1k\"}],\"demo\":{\"inputs\":[{\"filename\":\"mask-generation-input.png\",\"type\":\"img\"}],\"outputs\":[{\"table\":[[\"Dimension + 1\",\"Dimension 2\",\"Dimension 3\"],[\"0.21236686408519745\",\"1.0919708013534546\",\"0.8512550592422485\"],[\"0.809657871723175\",\"-0.18544459342956543\",\"-0.7851548194885254\"],[\"1.3103108406066895\",\"-0.2479034662246704\",\"-0.9107287526130676\"],[\"1.8536205291748047\",\"-0.36419737339019775\",\"0.09717650711536407\"]],\"type\":\"tabular\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + powerful image feature extraction model.\",\"id\":\"timm/vit_large_patch14_dinov2.lvd142m\"},{\"description\":\"A + strong image feature extraction model.\",\"id\":\"nvidia/MambaVision-T-1K\"},{\"description\":\"A + robust image feature extraction model.\",\"id\":\"facebook/dino-vitb16\"},{\"description\":\"Strong + image feature extraction model made for information retrieval from documents.\",\"id\":\"vidore/colpali\"},{\"description\":\"Strong + image feature extraction model that can be used on images and documents.\",\"id\":\"OpenGVLab/InternViT-6B-448px-V1-2\"}],\"spaces\":[],\"summary\":\"Image + feature extraction is the task of extracting features learnt in a computer + vision model.\",\"widgetModels\":[],\"id\":\"image-feature-extraction\",\"label\":\"Image + Feature Extraction\",\"libraries\":[\"timm\",\"transformers\"]},\"image-segmentation\":{\"datasets\":[{\"description\":\"Scene + segmentation dataset.\",\"id\":\"scene_parse_150\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-segmentation-input.jpeg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"image-segmentation-output.png\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"Average + Precision (AP) is the Area Under the PR Curve (AUC-PR). It is calculated for + each semantic class separately\",\"id\":\"Average Precision\"},{\"description\":\"Mean + Average Precision (mAP) is the overall average of the AP values\",\"id\":\"Mean + Average Precision\"},{\"description\":\"Intersection over Union (IoU) is the + overlap of segmentation masks. Mean IoU is the average of the IoU of all semantic + classes\",\"id\":\"Mean Intersection over Union\"},{\"description\":\"AP\u03B1 + is the Average Precision at the IoU threshold of a \u03B1 value, for example, + AP50 and AP75\",\"id\":\"AP\u03B1\"}],\"models\":[{\"description\":\"Solid + semantic segmentation model trained on ADE20k.\",\"id\":\"openmmlab/upernet-convnext-small\"},{\"description\":\"Background + removal model.\",\"id\":\"briaai/RMBG-1.4\"},{\"description\":\"A multipurpose + image segmentation model for high resolution images.\",\"id\":\"ZhengPeng7/BiRefNet\"},{\"description\":\"Powerful + human-centric image segmentation model.\",\"id\":\"facebook/sapiens-seg-1b\"},{\"description\":\"Panoptic + segmentation model trained on the COCO (common objects) dataset.\",\"id\":\"facebook/mask2former-swin-large-coco-panoptic\"}],\"spaces\":[{\"description\":\"A + semantic segmentation application that can predict unseen instances out of + the box.\",\"id\":\"facebook/ov-seg\"},{\"description\":\"One of the strongest + segmentation applications.\",\"id\":\"jbrinkma/segment-anything\"},{\"description\":\"A + human-centric segmentation model.\",\"id\":\"facebook/sapiens-pose\"},{\"description\":\"An + instance segmentation application to predict neuronal cell types from microscopy + images.\",\"id\":\"rashmi/sartorius-cell-instance-segmentation\"},{\"description\":\"An + application that segments videos.\",\"id\":\"ArtGAN/Segment-Anything-Video\"},{\"description\":\"An + panoptic segmentation application built for outdoor environments.\",\"id\":\"segments/panoptic-segment-anything\"}],\"summary\":\"Image + Segmentation divides an image into segments where each pixel in the image + is mapped to an object. This task has multiple variants such as instance segmentation, + panoptic segmentation and semantic segmentation.\",\"widgetModels\":[\"nvidia/segformer-b0-finetuned-ade-512-512\"],\"youtubeId\":\"dKE8SIt9C-w\",\"id\":\"image-segmentation\",\"label\":\"Image + Segmentation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"image-to-image\":{\"datasets\":[{\"description\":\"Synthetic + dataset, for image relighting\",\"id\":\"VIDIT\"},{\"description\":\"Multiple + images of celebrities, used for facial expression translation\",\"id\":\"huggan/CelebA-faces\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-to-image-input.jpeg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"image-to-image-output.png\",\"type\":\"img\"}]},\"isPlaceholder\":false,\"metrics\":[{\"description\":\"Peak + Signal to Noise Ratio (PSNR) is an approximation of the human perception, + considering the ratio of the absolute intensity with respect to the variations. + Measured in dB, a high value indicates a high fidelity.\",\"id\":\"PSNR\"},{\"description\":\"Structural + Similarity Index (SSIM) is a perceptual metric which compares the luminance, + contrast and structure of two images. The values of SSIM range between -1 + and 1, and higher values indicate closer resemblance to the original image.\",\"id\":\"SSIM\"},{\"description\":\"Inception + Score (IS) is an analysis of the labels predicted by an image classification + model when presented with a sample of the generated images.\",\"id\":\"IS\"}],\"models\":[{\"description\":\"An + image-to-image model to improve image resolution.\",\"id\":\"fal/AuraSR-v2\"},{\"description\":\"A + model that increases the resolution of an image.\",\"id\":\"keras-io/super-resolution\"},{\"description\":\"A + model that creates a set of variations of the input image in the style of + DALL-E using Stable Diffusion.\",\"id\":\"lambdalabs/sd-image-variations-diffusers\"},{\"description\":\"A + model that generates images based on segments in the input image and the text + prompt.\",\"id\":\"mfidabel/controlnet-segment-anything\"},{\"description\":\"A + model that takes an image and an instruction to edit the image.\",\"id\":\"timbrooks/instruct-pix2pix\"}],\"spaces\":[{\"description\":\"Image + enhancer application for low light.\",\"id\":\"keras-io/low-light-image-enhancement\"},{\"description\":\"Style + transfer application.\",\"id\":\"keras-io/neural-style-transfer\"},{\"description\":\"An + application that generates images based on segment control.\",\"id\":\"mfidabel/controlnet-segment-anything\"},{\"description\":\"Image + generation application that takes image control and text prompt.\",\"id\":\"hysts/ControlNet\"},{\"description\":\"Colorize + any image using this app.\",\"id\":\"ioclab/brightness-controlnet\"},{\"description\":\"Edit + images with instructions.\",\"id\":\"timbrooks/instruct-pix2pix\"}],\"summary\":\"Image-to-image + is the task of transforming an input image through a variety of possible manipulations + and enhancements, such as super-resolution, image inpainting, colorization, + and more.\",\"widgetModels\":[\"stabilityai/stable-diffusion-2-inpainting\"],\"youtubeId\":\"\",\"id\":\"image-to-image\",\"label\":\"Image-to-Image\",\"libraries\":[\"diffusers\",\"transformers\",\"transformers.js\"]},\"image-text-to-text\":{\"datasets\":[{\"description\":\"Instructions + composed of image and text.\",\"id\":\"liuhaotian/LLaVA-Instruct-150K\"},{\"description\":\"Conversation + turns where questions involve image and text.\",\"id\":\"liuhaotian/LLaVA-Pretrain\"},{\"description\":\"A + collection of datasets made for model fine-tuning.\",\"id\":\"HuggingFaceM4/the_cauldron\"},{\"description\":\"Screenshots + of websites with their HTML/CSS codes.\",\"id\":\"HuggingFaceM4/WebSight\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-text-to-text-input.png\",\"type\":\"img\"},{\"label\":\"Text + Prompt\",\"content\":\"Describe the position of the bee in detail.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"The + bee is sitting on a pink flower, surrounded by other flowers. The bee is positioned + in the center of the flower, with its head and front legs sticking out.\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"Powerful + vision language model with great visual understanding and reasoning capabilities.\",\"id\":\"meta-llama/Llama-3.2-11B-Vision-Instruct\"},{\"description\":\"Cutting-edge + vision language models.\",\"id\":\"allenai/Molmo-7B-D-0924\"},{\"description\":\"Small + yet powerful model.\",\"id\":\"vikhyatk/moondream2\"},{\"description\":\"Strong + image-text-to-text model.\",\"id\":\"Qwen/Qwen2-VL-7B-Instruct\"},{\"description\":\"Strong + image-text-to-text model.\",\"id\":\"mistralai/Pixtral-12B-2409\"},{\"description\":\"Strong + image-text-to-text model focused on documents.\",\"id\":\"stepfun-ai/GOT-OCR2_0\"}],\"spaces\":[{\"description\":\"Leaderboard + to evaluate vision language models.\",\"id\":\"opencompass/open_vlm_leaderboard\"},{\"description\":\"Vision + language models arena, where models are ranked by votes of users.\",\"id\":\"WildVision/vision-arena\"},{\"description\":\"Powerful + vision-language model assistant.\",\"id\":\"akhaliq/Molmo-7B-D-0924\"},{\"description\":\"An + image-text-to-text application focused on documents.\",\"id\":\"stepfun-ai/GOT_official_online_demo\"},{\"description\":\"An + application to compare outputs of different vision language models.\",\"id\":\"merve/compare_VLMs\"},{\"description\":\"An + application for chatting with an image-text-to-text model.\",\"id\":\"GanymedeNil/Qwen2-VL-7B\"}],\"summary\":\"Image-text-to-text + models take in an image and text prompt and output text. These models are + also called vision-language models, or VLMs. The difference from image-to-text + models is that these models take an additional text input, not restricting + the model to certain use cases like image captioning, and may also be trained + to accept a conversation as input.\",\"widgetModels\":[\"meta-llama/Llama-3.2-11B-Vision-Instruct\"],\"youtubeId\":\"IoGaGfU1CIg\",\"id\":\"image-text-to-text\",\"label\":\"Image-Text-to-Text\",\"libraries\":[\"transformers\"]},\"image-to-text\":{\"datasets\":[{\"description\":\"Dataset + from 12M image-text of Reddit\",\"id\":\"red_caps\"},{\"description\":\"Dataset + from 3.3M images of Google\",\"id\":\"datasets/conceptual_captions\"}],\"demo\":{\"inputs\":[{\"filename\":\"savanna.jpg\",\"type\":\"img\"}],\"outputs\":[{\"label\":\"Detailed + description\",\"content\":\"a herd of giraffes and zebras grazing in a field\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + robust image captioning model.\",\"id\":\"Salesforce/blip2-opt-2.7b\"},{\"description\":\"A + powerful and accurate image-to-text model that can also localize concepts + in images.\",\"id\":\"microsoft/kosmos-2-patch14-224\"},{\"description\":\"A + strong optical character recognition model.\",\"id\":\"facebook/nougat-base\"},{\"description\":\"A + powerful model that lets you have a conversation with the image.\",\"id\":\"llava-hf/llava-1.5-7b-hf\"}],\"spaces\":[{\"description\":\"An + application that compares various image captioning models.\",\"id\":\"nielsr/comparing-captioning-models\"},{\"description\":\"A + robust image captioning application.\",\"id\":\"flax-community/image-captioning\"},{\"description\":\"An + application that transcribes handwritings into text.\",\"id\":\"nielsr/TrOCR-handwritten\"},{\"description\":\"An + application that can caption images and answer questions about a given image.\",\"id\":\"Salesforce/BLIP\"},{\"description\":\"An + application that can caption images and answer questions with a conversational + agent.\",\"id\":\"Salesforce/BLIP2\"},{\"description\":\"An image captioning + application that demonstrates the effect of noise on captions.\",\"id\":\"johko/capdec-image-captioning\"}],\"summary\":\"Image + to text models output a text from a given image. Image captioning or optical + character recognition can be considered as the most common applications of + image to text.\",\"widgetModels\":[\"Salesforce/blip-image-captioning-large\"],\"youtubeId\":\"\",\"id\":\"image-to-text\",\"label\":\"Image-to-Text\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"keypoint-detection\":{\"datasets\":[{\"description\":\"A + dataset of hand keypoints of over 500k examples.\",\"id\":\"Vincent-luo/hagrid-mediapipe-hands\"}],\"demo\":{\"inputs\":[{\"filename\":\"keypoint-detection-input.png\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"keypoint-detection-output.png\",\"type\":\"img\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + robust keypoint detection model.\",\"id\":\"magic-leap-community/superpoint\"},{\"description\":\"Strong + keypoint detection model used to detect human pose.\",\"id\":\"facebook/sapiens-pose-1b\"}],\"spaces\":[{\"description\":\"An + application that detects hand keypoints in real-time.\",\"id\":\"datasciencedojo/Hand-Keypoint-Detection-Realtime\"},{\"description\":\"An + application to try a universal keypoint detection model.\",\"id\":\"merve/SuperPoint\"}],\"summary\":\"Keypoint + detection is the task of identifying meaningful distinctive points or features + in an image.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"keypoint-detection\",\"label\":\"Keypoint + Detection\",\"libraries\":[\"transformers\"]},\"mask-generation\":{\"datasets\":[],\"demo\":{\"inputs\":[{\"filename\":\"mask-generation-input.png\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"mask-generation-output.png\",\"type\":\"img\"}]},\"metrics\":[],\"models\":[{\"description\":\"Small + yet powerful mask generation model.\",\"id\":\"Zigeng/SlimSAM-uniform-50\"},{\"description\":\"Very + strong mask generation model.\",\"id\":\"facebook/sam2-hiera-large\"}],\"spaces\":[{\"description\":\"An + application that combines a mask generation model with a zero-shot object + detection model for text-guided image segmentation.\",\"id\":\"merve/OWLSAM2\"},{\"description\":\"An + application that compares the performance of a large and a small mask generation + model.\",\"id\":\"merve/slimsam\"},{\"description\":\"An application based + on an improved mask generation model.\",\"id\":\"SkalskiP/segment-anything-model-2\"},{\"description\":\"An + application to remove objects from videos using mask generation models.\",\"id\":\"SkalskiP/SAM_and_ProPainter\"}],\"summary\":\"Mask + generation is the task of generating masks that identify a specific object + or region of interest in a given image. Masks are often used in segmentation + tasks, where they provide a precise way to isolate the object of interest + for further processing or analysis.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"mask-generation\",\"label\":\"Mask + Generation\",\"libraries\":[\"transformers\"]},\"object-detection\":{\"datasets\":[{\"description\":\"Widely + used benchmark dataset for multiple vision tasks.\",\"id\":\"merve/coco2017\"},{\"description\":\"Multi-task + computer vision benchmark.\",\"id\":\"merve/pascal-voc\"}],\"demo\":{\"inputs\":[{\"filename\":\"object-detection-input.jpg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"object-detection-output.jpg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + Average Precision (AP) metric is the Area Under the PR Curve (AUC-PR). It + is calculated for each class separately\",\"id\":\"Average Precision\"},{\"description\":\"The + Mean Average Precision (mAP) metric is the overall average of the AP values\",\"id\":\"Mean + Average Precision\"},{\"description\":\"The AP\u03B1 metric is the Average + Precision at the IoU threshold of a \u03B1 value, for example, AP50 and AP75\",\"id\":\"AP\u03B1\"}],\"models\":[{\"description\":\"Solid + object detection model pre-trained on the COCO 2017 dataset.\",\"id\":\"facebook/detr-resnet-50\"},{\"description\":\"Real-time + and accurate object detection model.\",\"id\":\"jameslahm/yolov10x\"},{\"description\":\"Fast + and accurate object detection model trained on COCO and Object365 datasets.\",\"id\":\"PekingU/rtdetr_r18vd_coco_o365\"}],\"spaces\":[{\"description\":\"Leaderboard + to compare various object detection models across several metrics.\",\"id\":\"hf-vision/object_detection_leaderboard\"},{\"description\":\"An + application that contains various object detection models to try from.\",\"id\":\"Gradio-Blocks/Object-Detection-With-DETR-and-YOLOS\"},{\"description\":\"An + application that shows multiple cutting edge techniques for object detection + and tracking.\",\"id\":\"kadirnar/torchyolo\"},{\"description\":\"An object + tracking, segmentation and inpainting application.\",\"id\":\"VIPLab/Track-Anything\"},{\"description\":\"Very + fast object tracking application based on object detection.\",\"id\":\"merve/RT-DETR-tracking-coco\"}],\"summary\":\"Object + Detection models allow users to identify objects of certain defined classes. + Object detection models receive an image as input and output the images with + bounding boxes and labels on detected objects.\",\"widgetModels\":[\"facebook/detr-resnet-50\"],\"youtubeId\":\"WdAeKSOpxhw\",\"id\":\"object-detection\",\"label\":\"Object + Detection\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"video-classification\":{\"datasets\":[{\"description\":\"Benchmark + dataset used for video classification with videos that belong to 400 classes.\",\"id\":\"kinetics400\"}],\"demo\":{\"inputs\":[{\"filename\":\"video-classification-input.gif\",\"type\":\"img\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Playing + Guitar\",\"score\":0.514},{\"label\":\"Playing Tennis\",\"score\":0.193},{\"label\":\"Cooking\",\"score\":0.068}]}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"Strong + Video Classification model trained on the Kinetics 400 dataset.\",\"id\":\"google/vivit-b-16x2-kinetics400\"},{\"description\":\"Strong + Video Classification model trained on the Kinetics 400 dataset.\",\"id\":\"microsoft/xclip-base-patch32\"}],\"spaces\":[{\"description\":\"An + application that classifies video at different timestamps.\",\"id\":\"nateraw/lavila\"},{\"description\":\"An + application that classifies video.\",\"id\":\"fcakyon/video-classification\"}],\"summary\":\"Video + classification is the task of assigning a label or class to an entire video. + Videos are expected to have only one class for each video. Video classification + models take a video as input and return a prediction about which class the + video belongs to.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"video-classification\",\"label\":\"Video + Classification\",\"libraries\":[\"transformers\"]},\"question-answering\":{\"datasets\":[{\"description\":\"A + famous question answering dataset based on English articles from Wikipedia.\",\"id\":\"squad_v2\"},{\"description\":\"A + dataset of aggregated anonymized actual queries issued to the Google search + engine.\",\"id\":\"natural_questions\"}],\"demo\":{\"inputs\":[{\"label\":\"Question\",\"content\":\"Which + name is also used to describe the Amazon rainforest in English?\",\"type\":\"text\"},{\"label\":\"Context\",\"content\":\"The + Amazon rainforest, also known in English as Amazonia or the Amazon Jungle\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"Amazonia\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Exact + Match is a metric based on the strict character match of the predicted answer + and the right answer. For answers predicted correctly, the Exact Match will + be 1. Even if only one character is different, Exact Match will be 0\",\"id\":\"exact-match\"},{\"description\":\" + The F1-Score metric is useful if we value both false positives and false negatives + equally. The F1-Score is calculated on each word in the predicted sequence + against the correct answer\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + robust baseline model for most question answering domains.\",\"id\":\"deepset/roberta-base-squad2\"},{\"description\":\"Small + yet robust model that can answer questions.\",\"id\":\"distilbert/distilbert-base-cased-distilled-squad\"},{\"description\":\"A + special model that can answer questions from tables.\",\"id\":\"google/tapas-base-finetuned-wtq\"}],\"spaces\":[{\"description\":\"An + application that can answer a long question from Wikipedia.\",\"id\":\"deepset/wikipedia-assistant\"}],\"summary\":\"Question + Answering models can retrieve the answer to a question from a given text, + which is useful for searching for an answer in a document. Some question answering + models can generate answers without context!\",\"widgetModels\":[\"deepset/roberta-base-squad2\"],\"youtubeId\":\"ajPx5LwJD-I\",\"id\":\"question-answering\",\"label\":\"Question + Answering\",\"libraries\":[\"adapter-transformers\",\"allennlp\",\"transformers\",\"transformers.js\"]},\"reinforcement-learning\":{\"datasets\":[{\"description\":\"A + curation of widely used datasets for Data Driven Deep Reinforcement Learning + (D4RL)\",\"id\":\"edbeeching/decision_transformer_gym_replay\"}],\"demo\":{\"inputs\":[{\"label\":\"State\",\"content\":\"Red + traffic light, pedestrians are about to pass.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Action\",\"content\":\"Stop + the car.\",\"type\":\"text\"},{\"label\":\"Next State\",\"content\":\"Yellow + light, pedestrians have crossed.\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Accumulated + reward across all time steps discounted by a factor that ranges between 0 + and 1 and determines how much the agent optimizes for future relative to immediate + rewards. Measures how good is the policy ultimately found by a given algorithm + considering uncertainty over the future.\",\"id\":\"Discounted Total Reward\"},{\"description\":\"Average + return obtained after running the policy for a certain number of evaluation + episodes. As opposed to total reward, mean reward considers how much reward + a given algorithm receives while learning.\",\"id\":\"Mean Reward\"},{\"description\":\"Measures + how good a given algorithm is after a predefined time. Some algorithms may + be guaranteed to converge to optimal behavior across many time steps. However, + an agent that reaches an acceptable level of optimality after a given time + horizon may be preferable to one that ultimately reaches optimality but takes + a long time.\",\"id\":\"Level of Performance After Some Time\"}],\"models\":[{\"description\":\"A + Reinforcement Learning model trained on expert data from the Gym Hopper environment\",\"id\":\"edbeeching/decision-transformer-gym-hopper-expert\"},{\"description\":\"A + PPO agent playing seals/CartPole-v0 using the stable-baselines3 library and + the RL Zoo.\",\"id\":\"HumanCompatibleAI/ppo-seals-CartPole-v0\"}],\"spaces\":[{\"description\":\"An + application for a cute puppy agent learning to catch a stick.\",\"id\":\"ThomasSimonini/Huggy\"},{\"description\":\"An + application to play Snowball Fight with a reinforcement learning agent.\",\"id\":\"ThomasSimonini/SnowballFight\"}],\"summary\":\"Reinforcement + learning is the computational approach of learning from action by interacting + with an environment through trial and error and receiving rewards (negative + or positive) as feedback\",\"widgetModels\":[],\"youtubeId\":\"q0BiUn5LiBc\",\"id\":\"reinforcement-learning\",\"label\":\"Reinforcement + Learning\",\"libraries\":[\"transformers\",\"stable-baselines3\",\"ml-agents\",\"sample-factory\"]},\"sentence-similarity\":{\"datasets\":[{\"description\":\"Bing + queries with relevant passages from various web sources.\",\"id\":\"ms_marco\"}],\"demo\":{\"inputs\":[{\"label\":\"Source + sentence\",\"content\":\"Machine learning is so easy.\",\"type\":\"text\"},{\"label\":\"Sentences + to compare to\",\"content\":\"Deep learning is so straightforward.\",\"type\":\"text\"},{\"label\":\"\",\"content\":\"This + is so difficult, like rocket science.\",\"type\":\"text\"},{\"label\":\"\",\"content\":\"I + can't believe how much I struggled with this.\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Deep + learning is so straightforward.\",\"score\":0.623},{\"label\":\"This is so + difficult, like rocket science.\",\"score\":0.413},{\"label\":\"I can't believe + how much I struggled with this.\",\"score\":0.256}]}]},\"metrics\":[{\"description\":\"Reciprocal + Rank is a measure used to rank the relevancy of documents given a set of documents. + Reciprocal Rank is the reciprocal of the rank of the document retrieved, meaning, + if the rank is 3, the Reciprocal Rank is 0.33. If the rank is 1, the Reciprocal + Rank is 1\",\"id\":\"Mean Reciprocal Rank\"},{\"description\":\"The similarity + of the embeddings is evaluated mainly on cosine similarity. It is calculated + as the cosine of the angle between two vectors. It is particularly useful + when your texts are not the same length\",\"id\":\"Cosine Similarity\"}],\"models\":[{\"description\":\"This + model works well for sentences and paragraphs and can be used for clustering/grouping + and semantic searches.\",\"id\":\"sentence-transformers/all-mpnet-base-v2\"},{\"description\":\"A + multilingual robust sentence similarity model..\",\"id\":\"BAAI/bge-m3\"}],\"spaces\":[{\"description\":\"An + application that leverages sentence similarity to answer questions from YouTube + videos.\",\"id\":\"Gradio-Blocks/Ask_Questions_To_YouTube_Videos\"},{\"description\":\"An + application that retrieves relevant PubMed abstracts for a given online article + which can be used as further references.\",\"id\":\"Gradio-Blocks/pubmed-abstract-retriever\"},{\"description\":\"An + application that leverages sentence similarity to summarize text.\",\"id\":\"nickmuchi/article-text-summarizer\"},{\"description\":\"A + guide that explains how Sentence Transformers can be used for semantic search.\",\"id\":\"sentence-transformers/Sentence_Transformers_for_semantic_search\"}],\"summary\":\"Sentence + Similarity is the task of determining how similar two texts are. Sentence + similarity models convert input texts into vectors (embeddings) that capture + semantic information and calculate how close (similar) they are between them. + This task is particularly useful for information retrieval and clustering/grouping.\",\"widgetModels\":[\"BAAI/bge-small-en-v1.5\"],\"youtubeId\":\"VCZq5AkbNEU\",\"id\":\"sentence-similarity\",\"label\":\"Sentence + Similarity\",\"libraries\":[\"sentence-transformers\",\"spacy\",\"transformers.js\"]},\"summarization\":{\"canonicalId\":\"text2text-generation\",\"datasets\":[{\"description\":\"News + articles in five different languages along with their summaries. Widely used + for benchmarking multilingual summarization models.\",\"id\":\"mlsum\"},{\"description\":\"English + conversations and their summaries. Useful for benchmarking conversational + agents.\",\"id\":\"samsum\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"The + tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey + building, and the tallest structure in Paris. Its base is square, measuring + 125 metres (410 ft) on each side. It was the first structure to reach a height + of 300 metres. Excluding transmitters, the Eiffel Tower is the second tallest + free-standing structure in France after the Millau Viaduct.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Output\",\"content\":\"The + tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey + building. It was the first structure to reach a height of 300 metres.\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"The + generated sequence is compared against its summary, and the overlap of tokens + are counted. ROUGE-N refers to overlap of N subsequent tokens, ROUGE-1 refers + to overlap of single tokens and ROUGE-2 is the overlap of two subsequent tokens.\",\"id\":\"rouge\"}],\"models\":[{\"description\":\"A + strong summarization model trained on English news articles. Excels at generating + factual summaries.\",\"id\":\"facebook/bart-large-cnn\"},{\"description\":\"A + summarization model trained on medical articles.\",\"id\":\"Falconsai/medical_summarization\"}],\"spaces\":[{\"description\":\"An + application that can summarize long paragraphs.\",\"id\":\"pszemraj/summarize-long-text\"},{\"description\":\"A + much needed summarization application for terms and conditions.\",\"id\":\"ml6team/distilbart-tos-summarizer-tosdr\"},{\"description\":\"An + application that summarizes long documents.\",\"id\":\"pszemraj/document-summarization\"},{\"description\":\"An + application that can detect errors in abstractive summarization.\",\"id\":\"ml6team/post-processing-summarization\"}],\"summary\":\"Summarization + is the task of producing a shorter version of a document while preserving + its important information. Some models can extract text from the original + input, while other models can generate entirely new text.\",\"widgetModels\":[\"facebook/bart-large-cnn\"],\"youtubeId\":\"yHnr5Dk2zCI\",\"id\":\"summarization\",\"label\":\"Summarization\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"table-question-answering\":{\"datasets\":[{\"description\":\"The + WikiTableQuestions dataset is a large-scale dataset for the task of question + answering on semi-structured tables.\",\"id\":\"wikitablequestions\"},{\"description\":\"WikiSQL + is a dataset of 80654 hand-annotated examples of questions and SQL queries + distributed across 24241 tables from Wikipedia.\",\"id\":\"wikisql\"}],\"demo\":{\"inputs\":[{\"table\":[[\"Rank\",\"Name\",\"No.of + reigns\",\"Combined days\"],[\"1\",\"lou Thesz\",\"3\",\"3749\"],[\"2\",\"Ric + Flair\",\"8\",\"3103\"],[\"3\",\"Harley Race\",\"7\",\"1799\"]],\"type\":\"tabular\"},{\"label\":\"Question\",\"content\":\"What + is the number of reigns for Harley Race?\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Result\",\"content\":\"7\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Checks + whether the predicted answer(s) is the same as the ground-truth answer(s).\",\"id\":\"Denotation + Accuracy\"}],\"models\":[{\"description\":\"A table question answering model + that is capable of neural SQL execution, i.e., employ TAPEX to execute a SQL + query on a given table.\",\"id\":\"microsoft/tapex-base\"},{\"description\":\"A + robust table question answering model.\",\"id\":\"google/tapas-base-finetuned-wtq\"}],\"spaces\":[{\"description\":\"An + application that answers questions based on table CSV files.\",\"id\":\"katanaml/table-query\"}],\"summary\":\"Table + Question Answering (Table QA) is the answering a question about an information + on a given table.\",\"widgetModels\":[\"google/tapas-base-finetuned-wtq\"],\"id\":\"table-question-answering\",\"label\":\"Table + Question Answering\",\"libraries\":[\"transformers\"]},\"tabular-classification\":{\"datasets\":[{\"description\":\"A + comprehensive curation of datasets covering all benchmarks.\",\"id\":\"inria-soda/tabular-benchmark\"}],\"demo\":{\"inputs\":[{\"table\":[[\"Glucose\",\"Blood + Pressure \",\"Skin Thickness\",\"Insulin\",\"BMI\"],[\"148\",\"72\",\"35\",\"0\",\"33.6\"],[\"150\",\"50\",\"30\",\"0\",\"35.1\"],[\"141\",\"60\",\"29\",\"1\",\"39.2\"]],\"type\":\"tabular\"}],\"outputs\":[{\"table\":[[\"Diabetes\"],[\"1\"],[\"1\"],[\"0\"]],\"type\":\"tabular\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"Breast + cancer prediction model based on decision trees.\",\"id\":\"scikit-learn/cancer-prediction-trees\"}],\"spaces\":[{\"description\":\"An + application that can predict defective products on a production line.\",\"id\":\"scikit-learn/tabular-playground\"},{\"description\":\"An + application that compares various tabular classification techniques on different + datasets.\",\"id\":\"scikit-learn/classification\"}],\"summary\":\"Tabular + classification is the task of classifying a target category (a group) based + on set of attributes.\",\"widgetModels\":[\"scikit-learn/tabular-playground\"],\"youtubeId\":\"\",\"id\":\"tabular-classification\",\"label\":\"Tabular + Classification\",\"libraries\":[\"sklearn\"]},\"tabular-regression\":{\"datasets\":[{\"description\":\"A + comprehensive curation of datasets covering all benchmarks.\",\"id\":\"inria-soda/tabular-benchmark\"}],\"demo\":{\"inputs\":[{\"table\":[[\"Car + Name\",\"Horsepower\",\"Weight\"],[\"ford torino\",\"140\",\"3,449\"],[\"amc + hornet\",\"97\",\"2,774\"],[\"toyota corolla\",\"65\",\"1,773\"]],\"type\":\"tabular\"}],\"outputs\":[{\"table\":[[\"MPG + (miles per gallon)\"],[\"17\"],[\"18\"],[\"31\"]],\"type\":\"tabular\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"mse\"},{\"description\":\"Coefficient + of determination (or R-squared) is a measure of how well the model fits the + data. Higher R-squared is considered a better fit.\",\"id\":\"r-squared\"}],\"models\":[{\"description\":\"Fish + weight prediction based on length measurements and species.\",\"id\":\"scikit-learn/Fish-Weight\"}],\"spaces\":[{\"description\":\"An + application that can predict weight of a fish based on set of attributes.\",\"id\":\"scikit-learn/fish-weight-prediction\"}],\"summary\":\"Tabular + regression is the task of predicting a numerical value given a set of attributes.\",\"widgetModels\":[\"scikit-learn/Fish-Weight\"],\"youtubeId\":\"\",\"id\":\"tabular-regression\",\"label\":\"Tabular + Regression\",\"libraries\":[\"sklearn\"]},\"text-classification\":{\"datasets\":[{\"description\":\"A + widely used dataset used to benchmark multiple variants of text classification.\",\"id\":\"nyu-mll/glue\"},{\"description\":\"A + text classification dataset used to benchmark natural language inference models\",\"id\":\"stanfordnlp/snli\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"I + love Hugging Face!\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"POSITIVE\",\"score\":0.9},{\"label\":\"NEUTRAL\",\"score\":0.1},{\"label\":\"NEGATIVE\",\"score\":0}]}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"The + F1 metric is the harmonic mean of the precision and recall. It can be calculated + as: F1 = 2 * (precision * recall) / (precision + recall)\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + robust model trained for sentiment analysis.\",\"id\":\"distilbert/distilbert-base-uncased-finetuned-sst-2-english\"},{\"description\":\"A + sentiment analysis model specialized in financial sentiment.\",\"id\":\"ProsusAI/finbert\"},{\"description\":\"A + sentiment analysis model specialized in analyzing tweets.\",\"id\":\"cardiffnlp/twitter-roberta-base-sentiment-latest\"},{\"description\":\"A + model that can classify languages.\",\"id\":\"papluca/xlm-roberta-base-language-detection\"},{\"description\":\"A + model that can classify text generation attacks.\",\"id\":\"meta-llama/Prompt-Guard-86M\"}],\"spaces\":[{\"description\":\"An + application that can classify financial sentiment.\",\"id\":\"IoannisTr/Tech_Stocks_Trading_Assistant\"},{\"description\":\"A + dashboard that contains various text classification tasks.\",\"id\":\"miesnerjacob/Multi-task-NLP\"},{\"description\":\"An + application that analyzes user reviews in healthcare.\",\"id\":\"spacy/healthsea-demo\"}],\"summary\":\"Text + Classification is the task of assigning a label or class to a given text. + Some use cases are sentiment analysis, natural language inference, and assessing + grammatical correctness.\",\"widgetModels\":[\"distilbert/distilbert-base-uncased-finetuned-sst-2-english\"],\"youtubeId\":\"leNG9fN9FQU\",\"id\":\"text-classification\",\"label\":\"Text + Classification\",\"libraries\":[\"adapter-transformers\",\"setfit\",\"spacy\",\"transformers\",\"transformers.js\"]},\"text-generation\":{\"datasets\":[{\"description\":\"A + large multilingual dataset of text crawled from the web.\",\"id\":\"mc4\"},{\"description\":\"Diverse + open-source data consisting of 22 smaller high-quality datasets. It was used + to train GPT-Neo.\",\"id\":\"the_pile\"},{\"description\":\"Truly open-source, + curated and cleaned dialogue dataset.\",\"id\":\"HuggingFaceH4/ultrachat_200k\"},{\"description\":\"An + instruction dataset with preference ratings on responses.\",\"id\":\"openbmb/UltraFeedback\"},{\"description\":\"A + large synthetic dataset for alignment of text generation models.\",\"id\":\"argilla/magpie-ultra-v0.1\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"Once + upon a time,\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Output\",\"content\":\"Once + upon a time, we knew that our ancestors were on the verge of extinction. The + great explorers and poets of the Old World, from Alexander the Great to Chaucer, + are dead and gone. A good many of our ancient explorers and poets have\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Cross + Entropy is a metric that calculates the difference between two probability + distributions. Each probability distribution is the distribution of predicted + words\",\"id\":\"Cross Entropy\"},{\"description\":\"The Perplexity metric + is the exponential of the cross-entropy loss. It evaluates the probabilities + assigned to the next word by the model. Lower perplexity indicates better + performance\",\"id\":\"Perplexity\"}],\"models\":[{\"description\":\"A text-generation + model trained to follow instructions.\",\"id\":\"google/gemma-2-2b-it\"},{\"description\":\"Very + powerful text generation model trained to follow instructions.\",\"id\":\"meta-llama/Meta-Llama-3.1-8B-Instruct\"},{\"description\":\"Small + yet powerful text generation model.\",\"id\":\"microsoft/Phi-3-mini-4k-instruct\"},{\"description\":\"A + very powerful model that can solve mathematical problems.\",\"id\":\"AI-MO/NuminaMath-7B-TIR\"},{\"description\":\"Strong + text generation model to follow instructions.\",\"id\":\"Qwen/Qwen2.5-7B-Instruct\"},{\"description\":\"Very + strong open-source large language model.\",\"id\":\"nvidia/Llama-3.1-Nemotron-70B-Instruct\"}],\"spaces\":[{\"description\":\"A + leaderboard to compare different open-source text generation models based + on various benchmarks.\",\"id\":\"open-llm-leaderboard/open_llm_leaderboard\"},{\"description\":\"A + leaderboard for comparing chain-of-thought performance of models.\",\"id\":\"logikon/open_cot_leaderboard\"},{\"description\":\"An + text generation based application based on a very powerful LLaMA2 model.\",\"id\":\"ysharma/Explore_llamav2_with_TGI\"},{\"description\":\"An + text generation based application to converse with Zephyr model.\",\"id\":\"HuggingFaceH4/zephyr-chat\"},{\"description\":\"A + leaderboard that ranks text generation models based on blind votes from people.\",\"id\":\"lmsys/chatbot-arena-leaderboard\"},{\"description\":\"An + chatbot to converse with a very powerful text generation model.\",\"id\":\"mlabonne/phixtral-chat\"}],\"summary\":\"Generating + text is the task of generating new text given another text. These models can, + for example, fill in incomplete text or paraphrase.\",\"widgetModels\":[\"mistralai/Mistral-Nemo-Instruct-2407\"],\"youtubeId\":\"e9gNEAlsOvU\",\"id\":\"text-generation\",\"label\":\"Text + Generation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"text-to-image\":{\"datasets\":[{\"description\":\"RedCaps + is a large-scale dataset of 12M image-text pairs collected from Reddit.\",\"id\":\"red_caps\"},{\"description\":\"Conceptual + Captions is a dataset consisting of ~3.3M images annotated with captions.\",\"id\":\"conceptual_captions\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"A + city above clouds, pastel colors, Victorian style\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"image.jpeg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + Inception Score (IS) measure assesses diversity and meaningfulness. It uses + a generated image sample to predict its label. A higher score signifies more + diverse and meaningful images.\",\"id\":\"IS\"},{\"description\":\"The Fr\xE9chet + Inception Distance (FID) calculates the distance between distributions between + synthetic and real samples. A lower FID score indicates better similarity + between the distributions of real and generated images.\",\"id\":\"FID\"},{\"description\":\"R-precision + assesses how the generated image aligns with the provided text description. + It uses the generated images as queries to retrieve relevant text descriptions. + The top 'r' relevant descriptions are selected and used to calculate R-precision + as r/R, where 'R' is the number of ground truth descriptions associated with + the generated images. A higher R-precision value indicates a better model.\",\"id\":\"R-Precision\"}],\"models\":[{\"description\":\"One + of the most powerful image generation models that can generate realistic outputs.\",\"id\":\"black-forest-labs/FLUX.1-dev\"},{\"description\":\"A + powerful yet fast image generation model.\",\"id\":\"latent-consistency/lcm-lora-sdxl\"},{\"description\":\"Text-to-image + model for photorealistic generation.\",\"id\":\"Kwai-Kolors/Kolors\"},{\"description\":\"A + powerful text-to-image model.\",\"id\":\"stabilityai/stable-diffusion-3-medium-diffusers\"}],\"spaces\":[{\"description\":\"A + powerful text-to-image application.\",\"id\":\"stabilityai/stable-diffusion-3-medium\"},{\"description\":\"A + text-to-image application to generate comics.\",\"id\":\"jbilcke-hf/ai-comic-factory\"},{\"description\":\"An + application to match multiple custom image generation models.\",\"id\":\"multimodalart/flux-lora-lab\"},{\"description\":\"A + powerful yet very fast image generation application.\",\"id\":\"latent-consistency/lcm-lora-for-sdxl\"},{\"description\":\"A + gallery to explore various text-to-image models.\",\"id\":\"multimodalart/LoraTheExplorer\"},{\"description\":\"An + application for `text-to-image`, `image-to-image` and image inpainting.\",\"id\":\"ArtGAN/Stable-Diffusion-ControlNet-WebUI\"},{\"description\":\"An + application to generate realistic images given photos of a person and a prompt.\",\"id\":\"InstantX/InstantID\"}],\"summary\":\"Text-to-image + is the task of generating images from input text. These pipelines can also + be used to modify and edit images based on text prompts.\",\"widgetModels\":[\"black-forest-labs/FLUX.1-dev\"],\"youtubeId\":\"\",\"id\":\"text-to-image\",\"label\":\"Text-to-Image\",\"libraries\":[\"diffusers\"]},\"text-to-speech\":{\"canonicalId\":\"text-to-audio\",\"datasets\":[{\"description\":\"10K + hours of multi-speaker English dataset.\",\"id\":\"parler-tts/mls_eng_10k\"},{\"description\":\"Multi-speaker + English dataset.\",\"id\":\"mythicinfinity/libritts_r\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"I + love audio models on the Hub!\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"audio.wav\",\"type\":\"audio\"}]},\"metrics\":[{\"description\":\"The + Mel Cepstral Distortion (MCD) metric is used to calculate the quality of generated + speech.\",\"id\":\"mel cepstral distortion\"}],\"models\":[{\"description\":\"A + powerful TTS model.\",\"id\":\"parler-tts/parler-tts-large-v1\"},{\"description\":\"A + massively multi-lingual TTS model.\",\"id\":\"coqui/XTTS-v2\"},{\"description\":\"Robust + TTS model.\",\"id\":\"metavoiceio/metavoice-1B-v0.1\"},{\"description\":\"A + prompt based, powerful TTS model.\",\"id\":\"parler-tts/parler_tts_mini_v0.1\"}],\"spaces\":[{\"description\":\"An + application for generate highly realistic, multilingual speech.\",\"id\":\"suno/bark\"},{\"description\":\"An + application on XTTS, a voice generation model that lets you clone voices into + different languages.\",\"id\":\"coqui/xtts\"},{\"description\":\"An application + that generates speech in different styles in English and Chinese.\",\"id\":\"mrfakename/E2-F5-TTS\"},{\"description\":\"An + application that synthesizes speech for diverse speaker prompts.\",\"id\":\"parler-tts/parler_tts_mini\"}],\"summary\":\"Text-to-Speech + (TTS) is the task of generating natural sounding speech given text input. + TTS models can be extended to have a single model that generates speech for + multiple speakers and multiple languages.\",\"widgetModels\":[\"suno/bark\"],\"youtubeId\":\"NW62DpzJ274\",\"id\":\"text-to-speech\",\"label\":\"Text-to-Speech\",\"libraries\":[\"espnet\",\"tensorflowtts\",\"transformers\",\"transformers.js\"]},\"text-to-video\":{\"datasets\":[{\"description\":\"Microsoft + Research Video to Text is a large-scale dataset for open domain video captioning\",\"id\":\"iejMac/CLIP-MSR-VTT\"},{\"description\":\"UCF101 + Human Actions dataset consists of 13,320 video clips from YouTube, with 101 + classes.\",\"id\":\"quchenyuan/UCF101-ZIP\"},{\"description\":\"A high-quality + dataset for human action recognition in YouTube videos.\",\"id\":\"nateraw/kinetics\"},{\"description\":\"A + dataset of video clips of humans performing pre-defined basic actions with + everyday objects.\",\"id\":\"HuggingFaceM4/something_something_v2\"},{\"description\":\"This + dataset consists of text-video pairs and contains noisy samples with irrelevant + video descriptions\",\"id\":\"HuggingFaceM4/webvid\"},{\"description\":\"A + dataset of short Flickr videos for the temporal localization of events with + descriptions.\",\"id\":\"iejMac/CLIP-DiDeMo\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"Darth + Vader is surfing on the waves.\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"text-to-video-output.gif\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"Inception + Score uses an image classification model that predicts class labels and evaluates + how distinct and diverse the images are. A higher score indicates better video + generation.\",\"id\":\"is\"},{\"description\":\"Frechet Inception Distance + uses an image classification model to obtain image embeddings. The metric + compares mean and standard deviation of the embeddings of real and generated + images. A smaller score indicates better video generation.\",\"id\":\"fid\"},{\"description\":\"Frechet + Video Distance uses a model that captures coherence for changes in frames + and the quality of each frame. A smaller score indicates better video generation.\",\"id\":\"fvd\"},{\"description\":\"CLIPSIM + measures similarity between video frames and text using an image-text similarity + model. A higher score indicates better video generation.\",\"id\":\"clipsim\"}],\"models\":[{\"description\":\"A + strong model for consistent video generation.\",\"id\":\"rain1011/pyramid-flow-sd3\"},{\"description\":\"A + robust model for text-to-video generation.\",\"id\":\"VideoCrafter/VideoCrafter2\"},{\"description\":\"A + cutting-edge text-to-video generation model.\",\"id\":\"TIGER-Lab/T2V-Turbo-V2\"}],\"spaces\":[{\"description\":\"An + application that generates video from text.\",\"id\":\"VideoCrafter/VideoCrafter\"},{\"description\":\"Consistent + video generation application.\",\"id\":\"TIGER-Lab/T2V-Turbo-V2\"},{\"description\":\"A + cutting edge video generation application.\",\"id\":\"Pyramid-Flow/pyramid-flow\"}],\"summary\":\"Text-to-video + models can be used in any application that requires generating consistent + sequence of images from text. \",\"widgetModels\":[],\"id\":\"text-to-video\",\"label\":\"Text-to-Video\",\"libraries\":[\"diffusers\"]},\"token-classification\":{\"datasets\":[{\"description\":\"A + widely used dataset useful to benchmark named entity recognition models.\",\"id\":\"eriktks/conll2003\"},{\"description\":\"A + multilingual dataset of Wikipedia articles annotated for named entity recognition + in over 150 different languages.\",\"id\":\"unimelb-nlp/wikiann\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"My + name is Omar and I live in Z\xFCrich.\",\"type\":\"text\"}],\"outputs\":[{\"text\":\"My + name is Omar and I live in Z\xFCrich.\",\"tokens\":[{\"type\":\"PERSON\",\"start\":11,\"end\":15},{\"type\":\"GPE\",\"start\":30,\"end\":36}],\"type\":\"text-with-tokens\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + robust performance model to identify people, locations, organizations and + names of miscellaneous entities.\",\"id\":\"dslim/bert-base-NER\"},{\"description\":\"A + strong model to identify people, locations, organizations and names in multiple + languages.\",\"id\":\"FacebookAI/xlm-roberta-large-finetuned-conll03-english\"},{\"description\":\"A + token classification model specialized on medical entity recognition.\",\"id\":\"blaze999/Medical-NER\"},{\"description\":\"Flair + models are typically the state of the art in named entity recognition tasks.\",\"id\":\"flair/ner-english\"}],\"spaces\":[{\"description\":\"An + application that can recognizes entities, extracts noun chunks and recognizes + various linguistic features of each token.\",\"id\":\"spacy/gradio_pipeline_visualizer\"}],\"summary\":\"Token + classification is a natural language understanding task in which a label is + assigned to some tokens in a text. Some popular token classification subtasks + are Named Entity Recognition (NER) and Part-of-Speech (PoS) tagging. NER models + could be trained to identify specific entities in a text, such as dates, individuals + and places; and PoS tagging would identify, for example, which words in a + text are verbs, nouns, and punctuation marks.\",\"widgetModels\":[\"FacebookAI/xlm-roberta-large-finetuned-conll03-english\"],\"youtubeId\":\"wVHdVlPScxA\",\"id\":\"token-classification\",\"label\":\"Token + Classification\",\"libraries\":[\"adapter-transformers\",\"flair\",\"spacy\",\"span-marker\",\"stanza\",\"transformers\",\"transformers.js\"]},\"translation\":{\"canonicalId\":\"text2text-generation\",\"datasets\":[{\"description\":\"A + dataset of copyright-free books translated into 16 different languages.\",\"id\":\"Helsinki-NLP/opus_books\"},{\"description\":\"An + example of translation between programming languages. This dataset consists + of functions in Java and C#.\",\"id\":\"google/code_x_glue_cc_code_to_code_trans\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"My + name is Omar and I live in Z\xFCrich.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Output\",\"content\":\"Mein + Name ist Omar und ich wohne in Z\xFCrich.\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"BLEU + score is calculated by counting the number of shared single or subsequent + tokens between the generated sequence and the reference. Subsequent n tokens + are called \u201Cn-grams\u201D. Unigram refers to a single token while bi-gram + refers to token pairs and n-grams refer to n subsequent tokens. The score + ranges from 0 to 1, where 1 means the translation perfectly matched and 0 + did not match at all\",\"id\":\"bleu\"},{\"description\":\"\",\"id\":\"sacrebleu\"}],\"models\":[{\"description\":\"Very + powerful model that can translate many languages between each other, especially + low-resource languages.\",\"id\":\"facebook/nllb-200-1.3B\"},{\"description\":\"A + general-purpose Transformer that can be used to translate from English to + German, French, or Romanian.\",\"id\":\"google-t5/t5-base\"}],\"spaces\":[{\"description\":\"An + application that can translate between 100 languages.\",\"id\":\"Iker/Translate-100-languages\"},{\"description\":\"An + application that can translate between many languages.\",\"id\":\"Geonmo/nllb-translation-demo\"}],\"summary\":\"Translation + is the task of converting text from one language to another.\",\"widgetModels\":[\"facebook/mbart-large-50-many-to-many-mmt\"],\"youtubeId\":\"1JvfrvZgi6c\",\"id\":\"translation\",\"label\":\"Translation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"unconditional-image-generation\":{\"datasets\":[{\"description\":\"The + CIFAR-100 dataset consists of 60000 32x32 colour images in 100 classes, with + 600 images per class.\",\"id\":\"cifar100\"},{\"description\":\"Multiple images + of celebrities, used for facial expression translation.\",\"id\":\"CelebA\"}],\"demo\":{\"inputs\":[{\"label\":\"Seed\",\"content\":\"42\",\"type\":\"text\"},{\"label\":\"Number + of images to generate:\",\"content\":\"4\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"unconditional-image-generation-output.jpeg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + inception score (IS) evaluates the quality of generated images. It measures + the diversity of the generated images (the model predictions are evenly distributed + across all possible labels) and their 'distinction' or 'sharpness' (the model + confidently predicts a single label for each image).\",\"id\":\"Inception + score (IS)\"},{\"description\":\"The Fr\xE9chet Inception Distance (FID) evaluates + the quality of images created by a generative model by calculating the distance + between feature vectors for real and generated images.\",\"id\":\"Fre\u0107het + Inception Distance (FID)\"}],\"models\":[{\"description\":\"High-quality image + generation model trained on the CIFAR-10 dataset. It synthesizes images of + the ten classes presented in the dataset using diffusion probabilistic models, + a class of latent variable models inspired by considerations from nonequilibrium + thermodynamics.\",\"id\":\"google/ddpm-cifar10-32\"},{\"description\":\"High-quality + image generation model trained on the 256x256 CelebA-HQ dataset. It synthesizes + images of faces using diffusion probabilistic models, a class of latent variable + models inspired by considerations from nonequilibrium thermodynamics.\",\"id\":\"google/ddpm-celebahq-256\"}],\"spaces\":[{\"description\":\"An + application that can generate realistic faces.\",\"id\":\"CompVis/celeba-latent-diffusion\"}],\"summary\":\"Unconditional + image generation is the task of generating images with no condition in any + context (like a prompt text or another image). Once trained, the model will + create images that resemble its training data distribution.\",\"widgetModels\":[\"\"],\"youtubeId\":\"\",\"id\":\"unconditional-image-generation\",\"label\":\"Unconditional + Image Generation\",\"libraries\":[\"diffusers\"]},\"video-text-to-text\":{\"datasets\":[{\"description\":\"Multiple-choice + questions and answers about videos.\",\"id\":\"lmms-lab/Video-MME\"},{\"description\":\"A + dataset of instructions and question-answer pairs about videos.\",\"id\":\"lmms-lab/VideoChatGPT\"},{\"description\":\"Large + video understanding dataset.\",\"id\":\"HuggingFaceFV/finevideo\"}],\"demo\":{\"inputs\":[{\"filename\":\"video-text-to-text-input.gif\",\"type\":\"img\"},{\"label\":\"Text + Prompt\",\"content\":\"What is happening in this video?\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"The + video shows a series of images showing a fountain with water jets and a variety + of colorful flowers and butterflies in the background.\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + robust video-text-to-text model that can take in image and video inputs.\",\"id\":\"llava-hf/llava-onevision-qwen2-72b-ov-hf\"},{\"description\":\"Large + and powerful video-text-to-text model that can take in image and video inputs.\",\"id\":\"llava-hf/LLaVA-NeXT-Video-34B-hf\"}],\"spaces\":[{\"description\":\"An + application to chat with a video-text-to-text model.\",\"id\":\"llava-hf/video-llava\"},{\"description\":\"A + leaderboard for various video-text-to-text models.\",\"id\":\"opencompass/openvlm_video_leaderboard\"}],\"summary\":\"Video-text-to-text + models take in a video and a text prompt and output text. These models are + also called video-language models.\",\"widgetModels\":[\"\"],\"youtubeId\":\"\",\"id\":\"video-text-to-text\",\"label\":\"Video-Text-to-Text\",\"libraries\":[\"transformers\"]},\"visual-question-answering\":{\"datasets\":[{\"description\":\"A + widely used dataset containing questions (with answers) about images.\",\"id\":\"Graphcore/vqa\"},{\"description\":\"A + dataset to benchmark visual reasoning based on text in images.\",\"id\":\"facebook/textvqa\"}],\"demo\":{\"inputs\":[{\"filename\":\"elephant.jpeg\",\"type\":\"img\"},{\"label\":\"Question\",\"content\":\"What + is in this image?\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"elephant\",\"score\":0.97},{\"label\":\"elephants\",\"score\":0.06},{\"label\":\"animal\",\"score\":0.003}]}]},\"isPlaceholder\":false,\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"Measures + how much a predicted answer differs from the ground truth based on the difference + in their semantic meaning.\",\"id\":\"wu-palmer similarity\"}],\"models\":[{\"description\":\"A + visual question answering model trained to convert charts and plots to text.\",\"id\":\"google/deplot\"},{\"description\":\"A + visual question answering model trained for mathematical reasoning and chart + derendering from images.\",\"id\":\"google/matcha-base\"},{\"description\":\"A + strong visual question answering that answers questions from book covers.\",\"id\":\"google/pix2struct-ocrvqa-large\"}],\"spaces\":[{\"description\":\"An + application that compares visual question answering models across different + tasks.\",\"id\":\"merve/pix2struct\"},{\"description\":\"An application that + can answer questions based on images.\",\"id\":\"nielsr/vilt-vqa\"},{\"description\":\"An + application that can caption images and answer questions about a given image. + \",\"id\":\"Salesforce/BLIP\"},{\"description\":\"An application that can + caption images and answer questions about a given image. \",\"id\":\"vumichien/Img2Prompt\"}],\"summary\":\"Visual + Question Answering is the task of answering open-ended questions based on + an image. They output natural language responses to natural language questions.\",\"widgetModels\":[\"dandelin/vilt-b32-finetuned-vqa\"],\"youtubeId\":\"\",\"id\":\"visual-question-answering\",\"label\":\"Visual + Question Answering\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"zero-shot-classification\":{\"datasets\":[{\"description\":\"A + widely used dataset used to benchmark multiple variants of text classification.\",\"id\":\"nyu-mll/glue\"},{\"description\":\"The + Multi-Genre Natural Language Inference (MultiNLI) corpus is a crowd-sourced + collection of 433k sentence pairs annotated with textual entailment information.\",\"id\":\"nyu-mll/multi_nli\"},{\"description\":\"FEVER + is a publicly available dataset for fact extraction and verification against + textual sources.\",\"id\":\"fever/fever\"}],\"demo\":{\"inputs\":[{\"label\":\"Text + Input\",\"content\":\"Dune is the best movie ever.\",\"type\":\"text\"},{\"label\":\"Candidate + Labels\",\"content\":\"CINEMA, ART, MUSIC\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"CINEMA\",\"score\":0.9},{\"label\":\"ART\",\"score\":0.1},{\"label\":\"MUSIC\",\"score\":0}]}]},\"metrics\":[],\"models\":[{\"description\":\"Powerful + zero-shot text classification model.\",\"id\":\"facebook/bart-large-mnli\"},{\"description\":\"Powerful + zero-shot multilingual text classification model that can accomplish multiple + tasks.\",\"id\":\"MoritzLaurer/mDeBERTa-v3-base-xnli-multilingual-nli-2mil7\"}],\"spaces\":[],\"summary\":\"Zero-shot + text classification is a task in natural language processing where a model + is trained on a set of labeled examples but is then able to classify new examples + from previously unseen classes.\",\"widgetModels\":[\"facebook/bart-large-mnli\"],\"id\":\"zero-shot-classification\",\"label\":\"Zero-Shot + Classification\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"zero-shot-image-classification\":{\"datasets\":[{\"description\":\"\",\"id\":\"\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-classification-input.jpeg\",\"type\":\"img\"},{\"label\":\"Classes\",\"content\":\"cat, + dog, bird\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Cat\",\"score\":0.664},{\"label\":\"Dog\",\"score\":0.329},{\"label\":\"Bird\",\"score\":0.008}]}]},\"metrics\":[{\"description\":\"Computes + the number of times the correct label appears in top K labels predicted\",\"id\":\"top-K + accuracy\"}],\"models\":[{\"description\":\"Robust image classification model + trained on publicly available image-caption data.\",\"id\":\"openai/clip-vit-base-patch16\"},{\"description\":\"Strong + zero-shot image classification model.\",\"id\":\"google/siglip-so400m-patch14-224\"},{\"description\":\"Small + yet powerful zero-shot image classification model that can run on edge devices.\",\"id\":\"apple/MobileCLIP-S1-OpenCLIP\"},{\"description\":\"Strong + image classification model for biomedical domain.\",\"id\":\"microsoft/BiomedCLIP-PubMedBERT_256-vit_base_patch16_224\"}],\"spaces\":[{\"description\":\"An + application that leverages zero-shot image classification to find best captions + to generate an image. \",\"id\":\"pharma/CLIP-Interrogator\"},{\"description\":\"An + application to compare different zero-shot image classification models. \",\"id\":\"merve/compare_clip_siglip\"}],\"summary\":\"Zero-shot + image classification is the task of classifying previously unseen classes + during training of a model.\",\"widgetModels\":[\"google/siglip-so400m-patch14-224\"],\"youtubeId\":\"\",\"id\":\"zero-shot-image-classification\",\"label\":\"Zero-Shot + Image Classification\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"zero-shot-object-detection\":{\"datasets\":[],\"demo\":{\"inputs\":[{\"filename\":\"zero-shot-object-detection-input.jpg\",\"type\":\"img\"},{\"label\":\"Classes\",\"content\":\"cat, + dog, bird\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"zero-shot-object-detection-output.jpg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + Average Precision (AP) metric is the Area Under the PR Curve (AUC-PR). It + is calculated for each class separately\",\"id\":\"Average Precision\"},{\"description\":\"The + Mean Average Precision (mAP) metric is the overall average of the AP values\",\"id\":\"Mean + Average Precision\"},{\"description\":\"The AP\u03B1 metric is the Average + Precision at the IoU threshold of a \u03B1 value, for example, AP50 and AP75\",\"id\":\"AP\u03B1\"}],\"models\":[{\"description\":\"Solid + zero-shot object detection model.\",\"id\":\"IDEA-Research/grounding-dino-base\"},{\"description\":\"Cutting-edge + zero-shot object detection model.\",\"id\":\"google/owlv2-base-patch16-ensemble\"}],\"spaces\":[{\"description\":\"A + demo to try the state-of-the-art zero-shot object detection model, OWLv2.\",\"id\":\"merve/owlv2\"},{\"description\":\"A + demo that combines a zero-shot object detection and mask generation model + for zero-shot segmentation.\",\"id\":\"merve/OWLSAM\"}],\"summary\":\"Zero-shot + object detection is a computer vision task to detect objects and their classes + in images, without any prior training or knowledge of the classes. Zero-shot + object detection models receive an image as input, as well as a list of candidate + classes, and output the bounding boxes and labels where the objects have been + detected.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"zero-shot-object-detection\",\"label\":\"Zero-Shot + Object Detection\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"text-to-3d\":{\"datasets\":[{\"description\":\"A + large dataset of over 10 million 3D objects.\",\"id\":\"allenai/objaverse-xl\"},{\"description\":\"Descriptive + captions for 3D objects in Objaverse.\",\"id\":\"tiange/Cap3D\"}],\"demo\":{\"inputs\":[{\"label\":\"Prompt\",\"content\":\"a + cat statue\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Result\",\"content\":\"text-to-3d-3d-output-filename.glb\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"Text-to-3D + mesh model by OpenAI\",\"id\":\"openai/shap-e\"},{\"description\":\"Generative + 3D gaussian splatting model.\",\"id\":\"ashawkey/LGM\"}],\"spaces\":[{\"description\":\"Text-to-3D + demo with mesh outputs.\",\"id\":\"hysts/Shap-E\"},{\"description\":\"Text/image-to-3D + demo with splat outputs.\",\"id\":\"ashawkey/LGM\"}],\"summary\":\"Text-to-3D + models take in text input and produce 3D output.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"text-to-3d\",\"label\":\"Text-to-3D\",\"libraries\":[\"diffusers\"]},\"image-to-3d\":{\"datasets\":[{\"description\":\"A + large dataset of over 10 million 3D objects.\",\"id\":\"allenai/objaverse-xl\"},{\"description\":\"A + dataset of isolated object images for evaluating image-to-3D models.\",\"id\":\"dylanebert/iso3d\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-to-3d-image-input.png\",\"type\":\"img\"}],\"outputs\":[{\"label\":\"Result\",\"content\":\"image-to-3d-3d-output-filename.glb\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"Fast + image-to-3D mesh model by Tencent.\",\"id\":\"TencentARC/InstantMesh\"},{\"description\":\"Fast + image-to-3D mesh model by StabilityAI\",\"id\":\"stabilityai/TripoSR\"},{\"description\":\"A + scaled up image-to-3D mesh model derived from TripoSR.\",\"id\":\"hwjiang/Real3D\"},{\"description\":\"Generative + 3D gaussian splatting model.\",\"id\":\"ashawkey/LGM\"}],\"spaces\":[{\"description\":\"Leaderboard + to evaluate image-to-3D models.\",\"id\":\"dylanebert/3d-arena\"},{\"description\":\"Image-to-3D + demo with mesh outputs.\",\"id\":\"TencentARC/InstantMesh\"},{\"description\":\"Image-to-3D + demo with mesh outputs.\",\"id\":\"stabilityai/TripoSR\"},{\"description\":\"Image-to-3D + demo with mesh outputs.\",\"id\":\"hwjiang/Real3D\"},{\"description\":\"Image-to-3D + demo with splat outputs.\",\"id\":\"dylanebert/LGM-mini\"}],\"summary\":\"Image-to-3D + models take in image input and produce 3D output.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"image-to-3d\",\"label\":\"Image-to-3D\",\"libraries\":[\"diffusers\"]}}" + headers: + Access-Control-Allow-Origin: + - https://huggingface.co + Access-Control-Expose-Headers: + - X-Repo-Commit,X-Request-Id,X-Error-Code,X-Error-Message,X-Total-Count,ETag,Link,Accept-Ranges,Content-Range + Connection: + - keep-alive + Content-Length: + - '73726' + Content-Type: + - application/json; charset=utf-8 + Date: + - Tue, 29 Oct 2024 18:11:04 GMT + ETag: + - W/"11ffe-LYeRHzSmA1Ja2XTx5UWNPygcpnc" + Referrer-Policy: + - strict-origin-when-cross-origin + Vary: + - Origin + Via: + - 1.1 11449dc59e84720c51d6a0c2548907ec.cloudfront.net (CloudFront) + X-Amz-Cf-Id: + - LZmOzbuN3F53BLzrKwaZgs-ddSvC1rdBoyI38hlMT2o87eyhID2Ukg== + X-Amz-Cf-Pop: + - CCU50-P1 + X-Cache: + - Miss from cloudfront + X-Powered-By: + - huggingface-moon + X-Request-Id: + - Root=1-67212538-5efc4ac74a0ae2af68c672ba;a7327634-8511-4639-b52d-b0146d00c777 + cross-origin-opener-policy: + - same-origin + status: + code: 200 + message: OK +- request: + body: '{"question": "What is the animal doing?", "image": "/9j/4AAQSkZJRgABAQEBLAEsAAD/4QB8RXhpZgAASUkqAAgAAAACAA4BAgBHAAAAJgAAAJiCAgAHAAAAbQAAAAAAAABTaWJlcmlhbiB0aWdlciAoUGFudGhlcmEgdGlncmlzIGFsdGFpY2EpLCBhbHNvIGtub3duIGFzIHRoZSBBbXVyIHRpZ2VyLndyYW5nZWz/4QV4aHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wLwA8P3hwYWNrZXQgYmVnaW49Iu+7vyIgaWQ9Ilc1TTBNcENlaGlIenJlU3pOVGN6a2M5ZCI/Pgo8eDp4bXBtZXRhIHhtbG5zOng9ImFkb2JlOm5zOm1ldGEvIj4KCTxyZGY6UkRGIHhtbG5zOnJkZj0iaHR0cDovL3d3dy53My5vcmcvMTk5OS8wMi8yMi1yZGYtc3ludGF4LW5zIyI+CgkJPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIgeG1sbnM6cGhvdG9zaG9wPSJodHRwOi8vbnMuYWRvYmUuY29tL3Bob3Rvc2hvcC8xLjAvIiB4bWxuczpJcHRjNHhtcENvcmU9Imh0dHA6Ly9pcHRjLm9yZy9zdGQvSXB0YzR4bXBDb3JlLzEuMC94bWxucy8iICAgeG1sbnM6R2V0dHlJbWFnZXNHSUZUPSJodHRwOi8veG1wLmdldHR5aW1hZ2VzLmNvbS9naWZ0LzEuMC8iIHhtbG5zOmRjPSJodHRwOi8vcHVybC5vcmcvZGMvZWxlbWVudHMvMS4xLyIgeG1sbnM6cGx1cz0iaHR0cDovL25zLnVzZXBsdXMub3JnL2xkZi94bXAvMS4wLyIgIHhtbG5zOmlwdGNFeHQ9Imh0dHA6Ly9pcHRjLm9yZy9zdGQvSXB0YzR4bXBFeHQvMjAwOC0wMi0yOS8iIHhtbG5zOnhtcFJpZ2h0cz0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wL3JpZ2h0cy8iIGRjOlJpZ2h0cz0id3JhbmdlbCIgcGhvdG9zaG9wOkNyZWRpdD0iR2V0dHkgSW1hZ2VzL2lTdG9ja3Bob3RvIiBHZXR0eUltYWdlc0dJRlQ6QXNzZXRJRD0iNjI3NTQwMzg2IiB4bXBSaWdodHM6V2ViU3RhdGVtZW50PSJodHRwczovL3d3dy5pc3RvY2twaG90by5jb20vbGVnYWwvbGljZW5zZS1hZ3JlZW1lbnQ/dXRtX21lZGl1bT1vcmdhbmljJmFtcDt1dG1fc291cmNlPWdvb2dsZSZhbXA7dXRtX2NhbXBhaWduPWlwdGN1cmwiID4KPGRjOmNyZWF0b3I+PHJkZjpTZXE+PHJkZjpsaT53cmFuZ2VsPC9yZGY6bGk+PC9yZGY6U2VxPjwvZGM6Y3JlYXRvcj48ZGM6ZGVzY3JpcHRpb24+PHJkZjpBbHQ+PHJkZjpsaSB4bWw6bGFuZz0ieC1kZWZhdWx0Ij5TaWJlcmlhbiB0aWdlciAoUGFudGhlcmEgdGlncmlzIGFsdGFpY2EpLCBhbHNvIGtub3duIGFzIHRoZSBBbXVyIHRpZ2VyLjwvcmRmOmxpPjwvcmRmOkFsdD48L2RjOmRlc2NyaXB0aW9uPgo8cGx1czpMaWNlbnNvcj48cmRmOlNlcT48cmRmOmxpIHJkZjpwYXJzZVR5cGU9J1Jlc291cmNlJz48cGx1czpMaWNlbnNvclVSTD5odHRwczovL3d3dy5pc3RvY2twaG90by5jb20vcGhvdG8vbGljZW5zZS1nbTYyNzU0MDM4Ni0/dXRtX21lZGl1bT1vcmdhbmljJmFtcDt1dG1fc291cmNlPWdvb2dsZSZhbXA7dXRtX2NhbXBhaWduPWlwdGN1cmw8L3BsdXM6TGljZW5zb3JVUkw+PC9yZGY6bGk+PC9yZGY6U2VxPjwvcGx1czpMaWNlbnNvcj4KCQk8L3JkZjpEZXNjcmlwdGlvbj4KCTwvcmRmOlJERj4KPC94OnhtcG1ldGE+Cjw/eHBhY2tldCBlbmQ9InciPz4K/+0AnlBob3Rvc2hvcCAzLjAAOEJJTQQEAAAAAACBHAJQAAd3cmFuZ2VsHAJ4AEdTaWJlcmlhbiB0aWdlciAoUGFudGhlcmEgdGlncmlzIGFsdGFpY2EpLCBhbHNvIGtub3duIGFzIHRoZSBBbXVyIHRpZ2VyLhwCdAAHd3JhbmdlbBwCbgAYR2V0dHkgSW1hZ2VzL2lTdG9ja3Bob3RvAP/bAEMACgcHCAcGCggICAsKCgsOGBAODQ0OHRUWERgjHyUkIh8iISYrNy8mKTQpISIwQTE0OTs+Pj4lLkRJQzxINz0+O//bAEMBCgsLDg0OHBAQHDsoIig7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7O//CABEIAZgCZAMBEQACEQEDEQH/xAAaAAADAQEBAQAAAAAAAAAAAAACAwQBBQAG/8QAGQEBAQEBAQEAAAAAAAAAAAAAAAECAwQF/9oADAMBAAIQAxAAAAH5rncsdFHOOxPZnoC1NS9Sd3EONH5lWWwFNk9SN6MLGX4lOYu3nddL1ehyzZzit2LrQ1SVKdHjzC32IzpZskq2TOumamXKoEZbsDityPyl+uB3K5N3TzRxH2Hxc/vqe1VDoRXyntzMgtsuU5Ttuw5/otfGdTyZCpPbvRuVXMeZjImSqsm6VG7iNkKG5rhdoaMk8mKYecnlorVTqvzKcwpUaqNt6a20c5HGW+aH0g1L0Yp4O1ouhdyuTT3Gl2IbqkzhM7TEyNlXqqhu3ob5kPpsuqOjYszn2Qkm9dC5VEN27Kfbu/Py3EXpP7NEMyfluQWLswCBpeqvQkdD8nZK1Zt1uc4ZaannNGMjU+tYbT8wVyE9LnTR1uYmT3ONzA3FWjDMU/RowJgETL49KjpehJmJlmYbx0FM6Zo6TKlmy5s5k9LP0MkMl3oTSqyjywsJ+t8dXxyLveN7r1uIwrSzNgbMzMACjNJOmgp+ZTg6QFXpshU2BtFHZBSK8auU3MdmbsrpteteQEyQ+MyxNZlTrQdaUbMLArwGTOkPndBT3C5mjo30xkywlu5FVm0ZUbygk1pNpyCVZVeaHHrDwXuy9hZVa16w4LMGig8xFJ1WplJtAOChkHGUdmb07W3YsswMwFqqdiHC9UR8lNg9NJ1sLBk8HnSsZUxstnbSwYPz4T0Z0aesPUzlr2Wbe89GA7PaVJlRZ0jpcL0ySK6TQqeZlNwt4T1nuh8Bue8+m84Xp0cFmHE+mpsTbHq6JNjYyAsNWSavtK+26wFXy6T45qcgDac2rQWfZm6yW6Or63M23HW7eF6nJzlOMVddeyRRePId4HRtM1n1Dz0HO51Z5ydvWaGMuk2glem+aTdg87D3vg48aeptdKY3LfJZ+qLrrv8ALI2jkEqNGzIUrZqgJo4FckAZTAFdqdLp0wcRlHHqXPNGqnZOdRaT2XRZkGsJuQV2eg9eYWXV6Tn5w1RzJN33OZie7QkHb1k/m0y3OqPFfYUngsX2Lmm1d0wmWfWlRPVaeqPOh0CtjqzLOcHrU4Dq9bhJ7RoVyG5ynVHUdAgmwq0a00aaj1u69IbpaHF3PdfHrye3OnOoN46HLpyN56vPc3Xm3NXXT5b53bkrpi/U8tUkni4j1uJD22ck/Ob0m6ZrPqTw1q51SZtuciIzrDpdJPmgZi17wvVmxZtVelrOxFrQwVnUR0zDnS+dCutCgKZpuG5gUaPgoULXLfagUcPkMyg6bzfTZasaoxrkWdaV/blw5Xce0+s5Yuzr7xZy6caXo2I3lOs1U2Zl8/LInso53wpYvRG7m2DoOdJzpdbVEzLNLxbtZCpFZlubTYO5PbPKrSiGXMrWmJtvcZ5Mu8qeZTpsjKYh5vo8mjMzFCgUKHpMjYbDLDscvK69u1y7KETUXTn93vnLqSc+nE4dwsYUx9D6fM3U+f4dYJutMsi68w48l8YzmX2mYr9UcPdIaFGaK6VG9SHU1n0TSzYt2pzs3MqtnooVoyoc1VtPNvTKlYnjTGggsL/NC7x0lUmi5RrNNhKtyZEwjrGalPNNb631j5Ksh12J3msCh6c+rrKZe1c/NeX1CErk+j9Xl5K9Q+Mxvr8ulNkW+VeuPL4JsXLnLbqXDQcQaHVn5nehLu3JmTIyknKzrdOhY9IVyyOa9KeY1fdMrCPVkqxS15mHX45dmO0WqVUgm26NyPEyh6FhcRbZ0IViXcQ9O/L7a6ONlKHo4OuelKedfJ8u1vPbFi3j6XtwdvHDpfPpdy6c/eehjCNcSxnn8rJ2ZVWQZJ7xuW+bWbiw93esSXYWIWnPOPd9LOXyHtybqmyHOszWIVL1WMhb6TROdaU3PrOrwbDICPVuouULaOMZ0DiL3d0WZwPQupFr8mzpw/V0cS6nQ576XTnJVyUc+nznLrXnTlg1ntdeLO3JKozqLNSV510ufJLlBKnTctoY9ubRca+5kzp/SWayuhWfN1I91kVSJrm63iW6ks1JLYy6JdU0E2E2rWnD3SMk6nnhjaGFQ3UFTF5p5jUXoYGyuQudb2lHQc0F9PA6569zyVtxvdYpskzotSLh3tzqmOVvNnbiclG5z87ugd84pv6vneM5T8+SskkmqvpdsflmbsMHl+s8u0NxOLmgr3M5WnK3qFskdQi5aGStAxMgSTWqcF6jQ07XnyA+EZrNQICWvQKEYZBIyyTF9wre8q2Zes++3G64+k1jjTXXxrk8+jdTsWfF2X8erVFNOb25/ZM8Wa9nVvbj09Y5ud9nN43PmfPnz+d5utZqbkvvPavWzmXnqfFf2maLTLPUvNOOvM5XNuoda9LfMRNLtpmWywb0TOiK2XUGnhydLz5cBwvgpGdU+a/Yz1qxaOSrU8qORtnuWq/V2Z2vB1Ovcc5foZflM7u4d8l5esUzVUoJRLzd56OdM1lffh2N84K5mdfTZ0POfPefj6JOldYEsXRN0tucDm+5kejWpdYKDAy6e5Wu5l1Q6WaXpZxzptWqySrm9LzfQqmefrbZGsnDLdjpefC+NKPQ3IvRFYrIf1jyLBnQvFLWX5uGwvjro+/rV2z86vds+Ql+4Tn51Ny7TY3EjlqjRyzanod052duDtRZxJfqYozflvHjn4y7tDs5vTWZpZDZuaeIHW87vq1LLPTKc2fNeMijobXPUsZRjS91snkXaqqcyPevajczQ1Lmu45DB0FTMk07rHYjLfZS07LclwGBdj7adWj19at55ddCT4lq0uD59F8e0VhRZK0eSamyl15S9+NCWlpWNzebxny/mw3rEaSar+SXevWGbzL6Jt6KrbllBmJ56GD0PEHdu2VXOlbg+ZRu4i7U24MH5yKhbTiU887l7nR6R9ewZ0hcmDdMs9yo5MNwb6ZZ2V76M1sdOdY9FSieWfnufj3GwEqlYFU+slnVHXlZ248VfqGZbZTr5sfGfLebDLJetUoYI7V9mx7k9QaorRqPsVpLKObUzmXs0kbus5TKR0s+p5VWz25YyGR5DDSrjDzV0XNuxc29TMwUPQoZBYe5MKOsZzdP27f06RVLqco+jJ4lK5rh8+nWzeH159bn1ZvmvO09OR8eze/ENY7pza40qpfoksxPk+PNOXs63lJu1l6Wuqs5htA9tmaR6lK3MEzopxM5N6Ojc8+am1a8mJzumijUl1Rr0rJPVZI7lNjNC5i0DNKvSM5zdjsbzpZm1RYdDh0PRvod9x28yzjW1J1QRcsOdU5vV68/mZu7j6J+3m79zxufSveH6zOfPS0V0JfoJK0+T45jzkuOpxHQOxy7TNZkUrRjy+zMjNUNPGWUZPROK/obIVk8vsF7rAKh1V2nkvS3MbyejaNBXIZBYbMlD+kOQuZelVK4jS/vet6+qFl1OOLO/HFp8qCs6ljLPmZr6m5hl5cu6lJRGHrOiNiHL5nllXOU4pWS6oWq3JVt1KrmPO0wWCdVHRRiBp6XEPVyt5DgNHs5KOb42PVJ2BXl0qzk5sswdVucjijswZkOclge806mc6vm90jOajcr06Hq7t1OQX2UBywEtc0pOrZzzir9Okiwnak6VeNBFjY5eXzHFkz1uOV5cr0axQ0LJOjK6u8xc7NaKq0xGYBqq0y0pK+ajDELUitNWYmarkEltzZ8glU6EjcwLU2hYUu4nmapEK7Wd5zJdxMhup0bd9nW+yGrUqjypEnz1cte4ilcj6gEx3Tq2ETAAy1S8zO042MInPeWeb0ysfdLE7npZ7LOMXuDU26IxMlGvRpVkWI4ezzN6VbdzhWe1ZtViO4M2X0X56CajrZ9AkXKFlGM24jek5c0EjMxgPONOp6Kvt0kEFdnQFxNUcJXoWdxBPG1KfPSkvUuTBlQdHOsztedRmRtbDVARmszU0wekmucLEl5tsxSsltUrYWbFGWFvLKulRtbk3ckxqHZw7lCsTvXdu0Zm2no8fWHomzBzH0zaKVsLkHEZrNN3T118gvcuLBRMTL00eNsMeLMJzkrgmJ1ss6R0Mbi4991lZFYUuo3OkZqa0szahxzd5ZqLvKPXnUKWBU3TIt5QNOpwzNsrqOXM69ZRUCy6HBjgU+j31wYHYmVY5AXwwNCMNBGGEfPrbcydMcmzoJ0QDTDLJ1A0ckkqAVQKElx9NHL4+imCpAFk1MzV5vJ1mqWTWZN5ozfoefSjN5PXHek4O+atcMiBqa1kdHjCsb55z+2k9lPJ4C2jcTouBmikTu0p3r0InBGjBqYoC5MSq0VElGo9ZuPa9LuvLianPs7qVEIIFej1ZHqXDCSli5WH1CT8+vP59PS12YTahkhzSdOlz21V6zBrI13OXRWst1mo4txLriI6ufiuyzpGZiLQV8gxq06g1gfMlR6Mj6nXQUYJWhGV6EqgUEeRyknlIyI+XboXNusH0x8wpWd5GkVIK7MFxi0IglWYml+rQs653LrJnS7aYbYInUxJT5/ebuXS6V8T7zLrLc66EvK64+pw5t1kzyNcqbgk5vPWSZsq0j2WJRuvRIrFpwCJugsvre2yPGGKAgaJEJQSjFvQg1k49qGZt57OstuVV8udbU6IxELqaYrUmqaFLxJr7O5PNlxuXHTmzVWaobY2xNLs8kcvD3jp43dKjWeP0x0Oe+1jXH68/ozmzV+aR8pvgLJWKlbJ7U9KmF1XqhEtruYK8FmN5zvejo+3xowJFLqEAs5FM0yjq2p7HRnPVW8e6Yls7CbXBWSzrJeGYCvkmqOHrws6+hsvRM1NLx+XSnn0Bcsm3no5JrResqlljidefVxvoxwOvN819Hz17pzh07EZy6T51PXz3TzixZRZu2IoRyRSjbmTUG3auZHLta6vtSNMPCAStFqRiKGmrdBoym3Pzc12tYsVJ8gvfsrR4wBVEI5OHNdKO5qBHOWPG+jjUnLqlV2Q7z0c00k0skn1JZXx891x1Ak4up9jm2axzq6lg8+kPPpz1Ynz+/NRYmVWTQanqhESlGUym1ovN+l3vxoYJgZppgkMw6FlSYcuajTu3L65J0rHnzU1bc3KxDrBUqyc5Uv0CX1w5efL9Qis6nx05/Lpz6HebcVtgi9LGU16X1z850x2Dgan1MXazNXRQM6lxviZ3iuY+c1yGKJMj2rlyUimjkVqmeUoozPo+nTxgJpoIRDL6mRSnQr2NLKNZbrHz7XNj6a5trnHTsWvzJ9MmKyzIACuWSZ19Xc8OWSa+puWiZeby68vOuVt1eemy7Zli7LB2sosMo3z+dX1hHY1lSuSyVsfJc+yl+kxfj98JtNzMHs+EK+QdFyio1iZH1t67Y0SuwVnjREpD0Wr8bnzoITXU3yq68+JLw1+1uH1KdCuBNYz2Fyw5csiOfL2iGEr9CjAM2bO48bRLudxLWg2T6zbEe50Lnrbx87NOS/eOAQr3rkzmr9NAHy2Oj+PWbU1nlb89GZE0u31eqvOXSTqMrUHdyOzvdss6tQzx4IMwwMTw7LqSUTqb59DryDWeHNKT6ewQqE+Xzr6S5RLZU9nKlpOic+Xt2MzRln59JsbXm5KzUhXTyZvNAnUy56G8dW5+WmmWUXPzrXVudHL3E5APHtzM7QdiOLeXO1yXCd7CunnJZyiUZXJ7VboRdejjxhcaYeMBHGZsfHspUjEq1mvWen34Jrgy9RKqliiuPnVespluThKcvaudWjNTjaMbOyHn0CXZWjbk9TnzVCHvHtTo3MGg3Pa1lqfPNVpxaI07KUnHXrYvw80M32eXS1Pl9+ZCZrXqtziO32bsg2usGmWdudSG2imhGjhqKVMNmoePUJVjw0Fat46vo8/O1I46dLldYs50rC9PnZv6XWKc1edS43Pz6LUhOb4aEMs0SP1HbxPrPb1iegs4i/T3Inzi9FIBxVA1xJrpp8/wAu1HPYHRl5PTjDvHpAjDbdTYNPUGVMvU6bYVgngDxh48LFy5w7HKAY8mWpLN82+rz/AC8vTseaYKlXL1JV8es+Nrso1nJVzQS6DGGm2CGaV7z0uvLkWXWdGz1nz6rl+hZ481HK6mJlnCOosJdw6/M9M5Lfjd6R3ly9xkiawZLqYalMp8r2e+ngixggWr0iJpRpCdDnunl1wcMJ5X6nZ68W9eXxds6X2NXUCXujOXTm8e0ikm05FS6pHj0ZYoAaMq/WL+/B1c6jTpkR8hN3Zo5tFhWPCs5S1JWcWWvl1jzo66G+dWsyS8eF7zcMUESJhKLj6O6AwA8aOOanH1LM69LNZXHW8/fF9Br6VydDtxt9HD5bO+aDqPimW+zpxzPN6Y1WlB4Olx408OBFgADtS1Op6fOyznVy178lR89nXJzspVWOGJVSUWdesiPG+WFrN9OTwRMs8rgwwh1igomEIuXAbOlLzrEGiZV007nn7hKuXB8rynWC9fllr5zOqVOFHUTOHeaUVXZfLqCZSk1SjbFQqsCA1OxZ0/R59lWDK0Oz5XOopSUzTQ0osgkvW61qc8Xm+sFTy1GasYYwULDiiopSsaNJzyACvUh5LUAo6vm7g0uV8ttzOP1n3o4fOayE1dKywI7Oann0VLqyZu1Ses1EWSakVgSnDlYEiq7VlW8FQwoOyYhJllEpWOlw9Z43Km28GzixdKusxGQrpTBJ0uorIpWSgErEeTko4dFamKqdJDqefsrO2S6dPrywZrLemPgbNXoRQtVx1pZefRKzc9qVhiPsXZL1516y+vnM6eX2NlUkiuhyZWEtnhoUYopMvrHGx4aYKlNAryvCJD2MnddLjyi7bm3Rlyzw3NTTRpSYc4MtlYUpFU4K9Lh19nWygW9OdFlO+bumPiWppWDI61zbZPjpPy67msrc2SwrIOvNW8dqq0gXkS3SC10rnnmCVwFCChhYKUE6G5LKnJBo1VWHKw8ANJzrkccmK+HMYD1dCADJUrVQ8pBWVDNiwypqJAlq57bjbDZWazZ05s6c5tT5jG5xkNLNTqmcugZ0QwjzppPrPznbk5Xx0rKTlL1IfZwJdsaWgSpFytBsIWKq2xyc/N0yq9ZVLPnVBoseGkCvI8lFkn0mpxreardZmh81gcr6WmGiilAXK9ICujq42UpgWHrLevPnnBzRUlFLk+gsnzp+dADmjovN5XTEVjCgWtCMNh1QFNbAL6AEDZao9YnUWrkQFKKZVeoInFFXCzA0wAUrM3qah8MK7Xi2uorkc7EdKyxCavXskWOPIYJzqpj6SweXV+8AN3hNfOSzRspqFlMvZuabAl3OhpicuWTUOXyYEZWASssEOxctUricBJwaYUAwlXBgoKrsLU3GhHizATTQRA+OwnN8+I/Tuzz4V0u9NgYnjwS9BCXkGmnpYdZplbc/TQuhsafOVys2gYuS7Y+EV27lw0kqHKFq0plUiwQh9bKixgtPSjVAgXBKuzQooF06CWdDXADDx6zD0pig0BZjYEp5Zb5816yOEnv7YCnjVYTJ2F54J4ms2UUXZ0kcBby4mp8rYdUqVTXgETVAqwkTNVLZCDaJFQ9SsJVIkceFmhKMmqZqCpJoBhVXOCjKplnRwkYqAAhxMYJzPadXw833I5sXv7aToYakeTVMxEqiymVIo1BoTwQY2XQEaeVVjIWFaSEMlMw8nqnHGjRZZLFWQmx6xoARcvOQzTw1WpsKtenQljTnV9EvEmV22ywJtowoIyR1KxOzw5vxP//EACoQAAICAgICAgEFAQEBAQEAAAECAAMREgQhEBMiMUEFFCAjMkIzJDRE/9oACAEBAAEFAvAECzSazWEQiMPGJiYiiCZmZmFvAHgQRnljZiDMRZ+GjTWYxGHy+0yPCnBezJ2i9zHaiHqE9/n4wzQxYfBHn8BptiZzGJ2f7hiuVgInXgPh4Zt83s+E467NMTWawCAfxMImsxMTWBZrD5x5xMTMMIzEGIBCYWhg8MIrYCjwT14r+z4znxmZ8e47AzMD5hMEz41OcwGEBo481jLdToT/AECIh2RzgHqV/JZSmtcLBfI/hn+BHgQQCYhEx2Oo0wfIEMMEzNp9wiEeBDGHSscZj+DEHX8CfHcb/YhG0xj+CiDEcAjXs4EwDGqBmmIFxPqO+YCQNsyj7071BBq1YUhrJbdoGJY+czPk+SIZmBorzeE+R/DMzC38AYO4RCvn7gGPB8fnyYcTA8GAZEEP8FOPH3CcRiTN5mD6YhYWz/Clu5kzMq+7b9SuhgqyMeAJiGDw38GWaTX+Xey1WZNXxZCszDAPJmYjfwPnHRPgDszHgx/OPBg8mbTaKw1Now7Qv0iEzEzLP9eMQSv/AFM+E+II7WsA9QCYgEIh8/kDMPUz/Efw46QkRvoNtHrxMQxFZ5oBD65rCjLNoD5A2KUgQBQpqV5ZT6ig+Z+z5I8AZ8nznE2yfH3BHEFOTriY6x3f4X7Mr+/zkyjt8eXs62doFh8M0zBGMxBGPnaZ8AwmZg+q+qa2yzVd9I4IM/bbT9piPTcSaWQM1an90sS7WAM6ni5n7awRai0GEjWYK2HKsTOSm1dCQjtl/hj+B+1j/GExV68HwTFadYaa6x1NjMmsr1YNWpmhmMQxJVkKxgIw/QXk6jMYzMMA8HwBGSazWEQLCsUTEVMzHVag8U/GC5oW2CL7K9/UG5N911lwSocXk3ROJWgt0Fvqp5Fb8G1J+5aLzLUYMtksQ7KtcWyusFstd3xVJAI7b6I7li6wfXkxfu3vwPowmZij4soycRD3nI+jn5ue1bU5zD2DOp1KW2DDr6hbQe4TaHwO/wCAEAh8GYn1GMUQDwe5h0hbaet2lVeI7hI1jMaeL7F5jV8RbW/ULJTTdWGodQvEOHXk8Y0cpyb+GqxS1NwYWjRRPSQ1a932rHfZtumg0M0lgGB1MTHhh0v2x7RczBjeErGGth2JE7z+b2xO433K/qH7CwrEZlZD7K7HBbXcftgZ34CQLiYms1mPH3CJibT7hWAwTPgdTuvloSYxwQMxf/r5Faai7k8eo8q2yxQzKmbHPyUqLS3vWvi12VcivnUGptgIG2j9RSGPpYFTqS0z2xlZbGweaT6mIUmJrifbKgVcSxYqZjfFfFC5axNZ9xhs/Us+8RRgQeMToQ2sibbLU0BBAWBYBNYYTNpmE9qIR0ZiAT8Y7ggMEKfNmVVRTbZyg7Nx3Ti2vyL/ANpbQlS0VJx+OX6rGtGuRRcUPIVeTxuLxmUcuwKKG9dmjVz7AUo69DkV4cff1Af7MYRQFHvXGQ4D4jvNp911odiRAJ6yYtODyV1WAbFBrC/WQWYQkweFP8Mz8uMzUiDONYBB1PoZjGHzp32PGOsRhB9BcwrMRRFEtsVVFzXWpyNbOU/qqX9Pe3iH9NvddCZzgX4uDhegYO3/AEwkcVVZKbv07nOP29nCbkOtSryfU/3DcuxhXBdu6WxfZ8WGTPyMiZzCMLjrRmGL1BpcyqqbATbM5f8AnEpr1AHd4HqT7jrq2RMZgBWBsTEPkmLlpnrznMzB3NIRNewceD4EPgeNewuJ9DlPk/p6Eta23L5mzvx77l4w5lt8q5pA5SualzFWGOcTghhU/P8AhV+qEWfqdzWrzWJoty3G4DtH46Bj3PZgu+kFk/cFp7IbKoLKjP6pgRj3T/poX0UOGVjAZdZmVVdYljaD2tFMHYv8L9Rx35Y4g7n+VrPxg8KI+BBNvGIRB4xAIQISIvhFhhAnMAz+mV/08VPbzeajTh8g8e0UWcJ7uTmvg8/Sr/qo9S37v5mP0+rk8bWmr3cmzkX8m/8AUa//AJV4xt/Sv09sPkY5JOBUBLe4XCr/ANNkN9ytSW1OVX5K7LBaM5XDbEq3R7gJEVNrQuARLTm0/YikpLyDE7iw9RiNfxGOB9xRqGMT68fnMY5g+mmYPrxnreDslMxhiCCA48M05aT9PA/afpw1uu2PHRxya0u5VEr5jFXq1t1AgzgFsskdT7GvrSi22ywPYqUWqR+kcHDfp1VSLcJbxv8A6Md2V4jfeMHXEPUq5Gj9Q9eAQB7pkYxMTERtWDhhvmW/F7UwasZcBqsLnXuKnsliFDtgCOfkuITmYif5mTB9ZzPqZhXr8wRoT0IDiIcxq8wL3jwANVrE/UDP0sH9r+mkfuwihbaTxOWdM7dMM2AZgEA7xmWph63yoZSy18W3l/qP/wCDi2Ov6TRzS71HM5C4DXvs9my5JlagCz+wtW00+Rby9ndJG+gzD4yixSuW6lnYX5DGtnQlnTwHqk/O9hsftuk/OIB4TuYg+gYhxPs4m3UxFQwgYWaeA8F3WYTEGWgwBzqtmQericO79tykvrdOYtlp9QV2OYBiLPwsx1YO6xiZFg4VBRud/dP0o5Xk8UUc2pdUKh15Faq+SJWAx5JA8Bjljn+Dqun1K7iYrw/ZhgT4pbib1mBEaWqcg5XoxVyxEU4L9tNhpj+CsFmheCfUWbd58AwDE2h7C9EmYyPC+KUzCPkoxOb8VqT+pkxy34F6T9PpsqG78jjvjDwQDqCXjUTPqPK5lts74nE/SkKUcij3OnQxOUqWBwAeP93nNhnxVGb+DMdcRAQ0BzD4+4kbuLY1R94jmp4yFTR/txiCHw0Bi9mZgGW2mYYDPyHhMGIzQNN+lEMBMEx1Wk0ErAA1hnOH9fDf+nmcIcicK/I5tz1BuOmftmozApiiCfi8bKv/AJooMqSmipqn/Ub6axTVyOQtQXmBhTaLU5dauHGDWRXLO21hhgHXjbEPZidz4IejLeorYifbGBQ09eYEVwOOkZGWNkzGP4qcHbJ+ongT8ZgHjMEHjGQHxN5nPjuJkDJzWcxs4+Us7At9VvHt9q8rhi2U7tbybhVVxdiOMpZ1duOyNkbIIbOqHPJ5n2eE7Ohou5DBRUj8yWe6yJTYs4anPIB1bDl8wDCax1hnH1azk16ND5B+TtkwnIMU6nIILfGpwk2qYW9MdwEbKum4zgjsmsY0MxjwTFbDQNM5n5BhImZnpTB4Az4HUyIuMZEH3UwmJacBFN05NIVuJyErP7pg4VbHc8dLDxvfZSvL4913Gp5qPXZ+n2e8yvj288+n9nQOHyLJxTx8E/tk3a9bOM4NdDrFrIalVAcjHIOrscH7jqRMkzUYStbC25gr/rMP1BNSYZ/z/mbTdhAS0+OuoMVRhLAI1azdljKtkVdZmNgBj/BT8ceRD9kz78ZmYpxM5gh7PQWAzj/6zmWruDgHkOual5d8r4iIL3FHIyBH4ykq36lK/wB0DatdyVcRm5i6olh5sWvmkqOOFSu4ROKDP2qiV8XR9QYuBORYK5dZhv8ASpW0sOB7EeNhoa2ncovFcapLRqVBEEd58jFyIYFm0G2DYcLnHcGSa9ozzeZJ8EZjKVmMwTHcOYDPzG858gwQ+BM+KjPYNj/n412eoQeLyyz0sKjersvJ46sCjJf+oBZTyrqrq+Ul62XVAh6Jxylk1n+fHRmxBOCLiwlyMDX9Vt1Y3WcHaF2E7xiJaaz7EsVsGbYhPW5gOZmYaHxiBYoxCJWTh/tWzNcRcE4hSdCLWSdNfGMwVzQRlmkCzHhfsrkQGFpnJPmnoKNb2fMwBBckH1nrMY5l9gIqUs/qLwcewn1EhKNxyKGQ0LgU2gwMDNsFn1jW4CWbkSxhDsoX0Wx6zUVqFktrKOlZMXh2mWVNWdHM9DmHZCmXUoGnr6ZSJ9BVLTUzM6nRiZEziKQZ1LP9Cen4/BSxnuxF1fwZiaY8CY6x2648EStRNlhpV0ZShgbyk4/b3A1P+5AJBYZ/boLiAOQrF+Vmcix8GhmTicMA6AAVhZiBe+VQtqamuAssHJxH5K4V8wbb1poSTLBvA4ra6vaUcj2S2kiV27y+lFVUtaHimFLEimwRgcCqt5bxnrhDmaNjXMcawwyvuOkrJ2K2CVsdz0WUMET5VjD2U4jfdvTVZwDiHYhU2BXMFRnqgrIArE0XGiY9KsGoE9GBWrCPSHH7ODigR+PP27Ra5XhXvr9tbhqzxwFRUy7L0aGMeghFrBmsBxBbDYMFxGuABtzLGDRB/W+EmxBqcLK6u3/yIfjMJauXoZkW6Je9ZddpuFX96MjniDlVxbQZ1L6BOM4cW0awdx1zGHRjSr70zKqirE5DBYMs1mCFLKWPyW5sk/Nl72MBi9T9wIuAIOpsuGcYFsJ6yYWm3xU5BMzOsCfgDp+nrORyuMHFm3srs3f/AHHOAdWnSr/ztm1mi2ZhbKs2AHLDEZ9ZhnDHC8Tiu5xqrZNobJxChzsLI1TVwWK4Yax1L2egRuPGBBqv9cq5KupbIfaq0H2JyEKOtqWRhHEMUYlLAzcFicR3yQ5U5zGaZ2gysKiGAFZ+XszAmQFn1LNmibYHc9UxgNkKtkuBIocsuxmcw5xuYLmgOJn5p9LOVVoaiQtbjVk2GrLBGbIRcTSaT8FDYorAFhClaZbZpOJxfawUKtj4X8Z+fhwDFOZdTpGtYCuzLbdRqEMbjgRH9bVvkXLsvBeWoQ1vEhcqAfcz8dq6sT6ijtmJ8r/iDvzrNCITqx1cAsox8FEK5mrCesEKgWHoF4Zt1kzIsbSa6wUrn0jZ8RxiV/IARlDryKjTfQ+JW2VxmPVAvQWYhWLXma4lmFRacy+/1njcY8i1K1VWn+o5wVB1R2K/fgriexkN/HDKwatkfMUz8WWaGzDpxnlgytZ1vb5VUt/ZfSm9lT8S2q92NlPvZ/gQYevBWDIh7iIcxnCzLmCzpdXhGh9TGA5mPGfBzN2i9i5S6K2Jka7B4thUjGMT6WBNlr/otHiypbFGaLw2IvcxGrmsCzSYwGMQe5uTaFqppblWIiov1CDZLG0md4sb4lD4DQ/1T/xnIrDgZpauwED6tUlFdq3VdWVdhbWan41i21hAvItXavUW0XUvxnS51Odzr2VLT1CECGfUqPxss6PcB1JHYJWbd+2AwZE2DT6gBmJrmCuYhC2Qqs1s9hBaf5mPio2gU5Qd30e2viXZ882g54bbpW3Y8Y82NgduwxVWw9l3Fo9STBY33LSNXvZVwIw+PGPxjiVsLB/4s49ZtoDhksoai8OMbS8Hbi25FQ6vq2XiH1XXp83HQJFdgW2q6tqXUxVjtpAUeMvaj5WLXNBGQ4XqZG0VC5HFE9GIJ0Ia1yAMdCKY74K5zjE3wVXMOUK2K03rnp+QFawanx9zmVtTZRaLEjrsoP7bl6kFD0PLHExsf8nl2F7OMmZiYnK5a0Cin9wzrCMR+pmU/anw/wAG6sQdTul7KlcXUNU3F5AsHKp2Wj421jrHToPYyb0qNqruq0P9FyjkJ/hhYollosgwILAJnMJJIbokzaf9KcwMVhsebmK+0zscQMIJ8jNTNdSTAcz6O+SKFDLRAqwoDLuPgLznqau1bF+4av2zVWrYs/U6ZxbBYgGIDPxDMSw5WnscVcVTmc31Di8N7jjorHXpv8l8rxO0yPB+qiVLL7AU9oqUMLOPur1Wca1HFi3Kar+M21f4Ujf/AJ4465q61f8A8yHM/UeJlNDqIuAvqhXWYGZ9nWfkQmAw4gTB1GMawYgdTM4m5zHd1PyYrW24GCuDHsnsMDidMOXxjlLXonH5S3L9xx6WVthcgevjs3HvzkfmNBDHbLBWscdDm84Vni8BnsC4EdlEturEflpjWx2pArXEH0ploxEYOhXYXVsJTyVaW0rauDx7OXV7av06zKsZX8jbZqOKMV8lQ1Fg1q4/ysexdbxo8wTACIW6GDHgXo+MfHYz8ZjEqtdimMelwYRGU4VSC2TDsZqc4ySyh9sA9wsRGsLCr/P+hyKVcV3Px3pt9iMMir+o/Y/UKdk4V+zv1A+Yxh+84b/0sqqWuczkd8bhdjwRGpqlnHoaftahGQEFS0x0uDD8LdAVo/rsR9LMAjlcY1Ti8kGXVLamRVYg/b8x/kq/5O5vqGF5Da1WfKitPSj3bl6menOrtjBOIfqK2GrAywE16+pqrAoAuiyrlLBh568EKM4x4Vvlh8qrCHORmYn3Pwe5jY1t8U7lvHFk5NXsnBsal1McZCPsrgNO+NyG7Ss9/l3wOTbrXwkxL7fVVw6TFXE2AhtENsa6ezM3nUvSIcwDDWjqv5LyK2wye2vh37KVDDlcM1tw+XmcvjrarVsy8d967XGOJQABOezaUV549zZKaIa762nM45SB8IBDmZ7StcEQzbvHQGT9TM9C2T0aj+xZ7Q7DBmuYRiDOubBBeNtxl2Op+3OfAn+RXZiDtWVXW+oVPW/WZX8Gz1y68kWjSs4Zjhb7PlXR7FrGqvT7j7MAuYt2YzdM2Tk57n4WdPMaP9x1ynF+SlMrWCrtUarKrPYjDI5HEANDCyvkcVqrgpVnr9llY1G05Ngdukqvc+xELypQgzutiaOdZ1FrzYbESGwnxjMLEFLDguYGGNswAbYsWaMZ8Yr9ZbOZ9xpq0t/8swjwOoczMr5JQ7BhzRtx6LcoH6Y/25hwwxhlIDnuOn9lSdYAj2CM2A7kzEz0TCuYIMYzMiWfSDZSOuM2LRHrwbE2Sljx71bYOgYWq1N2wupHYSK0stwvH3tv5t4WuivdndUPuEq5Cmc1P7VRY3rEBhWKFh+w2DZ9bgAD2MOPYYKVsiFgAwaLrBo4s4+xHHWesswDJCyrGv1NvMDID8fH34MrXIXZX5IxRx3xx0tyu203gbMZxFB3AIBGI1pWC7LbwnImcTsgwZmZmbQHMU7CvoAZF9TU2izpTsCJy68jg8mFlVbeQLIvxO2SpjHEtta2xCOPXoeZZbihDvZFRyUoaIuZdxbKpjJXEJ+Xj4wgEa4nFwLf3BhCmYgwkbx8lPuwayXFnL3gMZvHG+SzEX6xmN98d9ZqGjj+uyv1gW4CPNutp9tTTgsQse4zZTBgeB9TOQp6P34zPuKTF+Lr9o0YBh/iJyFUhgRyKRYGD1XW3G0IwWexILBA6tCJXx9HNCWtqal5Nv7qLSKqmEqDOdAYDgWUV2S7jsDXwWJXhUCftePDwuPDwKo3Bsn7fkVMa9jtqCUcl1A909uIGDHTu20Fc9j6b6zOK+t+OzFOBt0RmBe6ycbfFj63srraHKxYgyUowSwVXszDNWJxhQe8mfmD/WCZriYmMQ4VoTM9rDLTAP7Kn6yDOcf7T9kwGB8TeV2YPJOIpUOqrNAJYdJ/exzrKymucQZi4MavSLYpAsE2E2EB+PUasWEsojjaerE1eEmKWR/YGXUTRYePbqcwDJH+vycTM+USBorYLclabubxt1AzPWDBWohyh4+z1vMkzXvOCOx/1jvWZme/ZqPbPZ3vmbxT2Tm2v7guQl1R56qZhR45u/vVxAwb+AaXt/VYrYpvM3zNsreuq5DDRNRACsNuk/cIsOLG169Yx67Y3tUBiEVlZTyGMzZNLSBQTDhYLCDrmLWSa1CjMt+S6KR/m0z6n3FgToDEAzLeOttZ5dtUDZK9gzBYioV1ssVOvxr2mYIc5I607J7d222YzYwDMxOhKvlajTp1XVF3WDkIZ7hhuYJdd7RoI6GbMIj7eMGEn1gZrK4CcjtLAxbIiD5KO2HW2fGqZ2Eb7NpDV25jPGAEH0C4tUjRDsykCP8AdhBi5xVguQsCwnUFcpZ/afwYB0sQ7IqTUCKOrEOyyp8Q4M4o+bHIC5jOuCczcTHSMRFIlx0UOzxKo1C2IwNbVLVYz1+qJ3LOLuDS6Cu1Fn7sRuUJ+4ILWs07MCCaloqaqV2jJ2EGT2dTsEUrgKM4hHVZtrY3ElQTNZ6nYWJZU68l6ytzFug12zW3BmXYrN4WE2UTUPPxTPwXKG3JsHbV4wBmAsFPUYlq+sA4jiL9D6pYQ/5A8XdWsBEXI7E41biV7s5Y3ErswWJTg2fWfHLtyKrZXaDMzlV7BM1W3XPZPbiVXowF+s+DAitpoDNRFYZgIEOGhxPizEqqWKjzKqn2MTXB/NIFzqpLFestn1OUVtHL7HkULAwD/uUlhyw7Ur8sLNacbETIx/qDqIbDDj2MmWVMRIGAJb5PAjCORKztTYO1X46zHdfYDTkX+tG+Td7AkPsJx2s21ayHM/P5P23cCKkvYBT9hIG1iWQkEFMSk1vLOLS0KkTAi15OOiuB/mBa2IqWCmf4GGMGymoMWJxFKidAouW0RQ3xh/0uALSHn2lb4j1holnz3ZpYmLGQmceh7rUu+PLqCWGBRmzIZvtPv8jIX68fEtFYPCIxyN2yxAThHakqSijx9T5RbFsP6lpZXnB2y7/X3KL/AFMGBRD7YVxMaRmzKRLv9XHdtDBB8pjSC3pHM2NZt5B9WVWfAT7hafLGWEY6OlqNB9N1CwYKcAPgNdlh9MIhCz7n5J2BbM2yo+sSuzBYj2vtHLaJlpwSvrcnflvtxrCTFO021hX5DGfzkEE+A+K2UWpWcz2Hbfot1Y2z0E0WK6vXgh4zCsDuPUtw5ICkZwemzlU+lUGVMPSh79vZUmBMtbbqt9nrrVMjQqAo2HcGIOierWaDLoVM2+PdkVCJqYRmbATUPCoUqpygh7LK0rTE1GQVEwGUexQLTn6jTszOPDd+cvCZxrvXahU0sBarKc01am0MHCDGezjC9QZMxKtGVBqfkFAn2LDkYn447lbFOwd9YWzE10v5lgc5aKDPvwBgrZ6iORZvxrF1Vkj2KqC0ojcnSMS9tSgm1opEJ6z0wIOheFVsRUfOxnoBLdEO2RZZg7GNttqcYMDYgMz3giVh46HJRhCra9iE5n3Am5QbQrB5UwtlPlsRNdojKC4BAPxyYfH3F7nY8KxNpuUQwDoiEiawdBQZRy9V/f17X8pGjcgvAO/qCHqDZTjIfuKFJC9jkaG3kmwXXm0BclABZsizO0EIzBMGbkQtmMTNoGxO4yFZ9TM9hQ7dhmA2Qz6gt+bMQRyJ7iDVraWuX3C5QEL2F6TWuU3X2h7LdLjbvN58vWALC2EU/KLloSVnbxCGXTIw0Hhu4Dhw2xEFYg/0W2A+ZcEPjIx0QYQVIBwa4UCTE1M+UCgwLtNfljxrCuAEnrE0AOoEA6xibTIj/THpDsTkWfUbJj1q6/5b6hOWEttbPqwtRIgVrDYpyE6rUZUMqqjKaqzfdf8A0ni0m2zlV1reliVK/L2ldb2CoftuPYd7cylVRX2LaPRCwhYSpsS85iYRfaa7KrksOs//xAAnEQACAQQCAgICAwEBAAAAAAAAARECECAwMUASIUFQA1ETYGEiMv/aAAgBAwEBPwHtLOB6Xrm7utEZP+kPvPrPsv6hdhr6iBJHimVUtb/Bni+pH1NLK6Y93VLfB/GeC/Z4saeKpk9Ukkj99WSbrN92l/DH+I8aUeX6IZ6/Z5KzR40ng/gp/G2P16W9YO6xV0PJ9acqbc+kf+T2xUjiRpVHi1wTPJxZ4/OTs7LZGp2RBHUpqulaRtJHkKpvgpVR4s8B+VJ5FLVVmRd+ugtjyYuuvYrtyNyQU1QSe3ycDcD9s4FVdkjeme1HXotBVPCH+rJXS9EEShD5EK0FeuM5+ogppgkn2VOEMm1FM41UxaSlkwSVrFi2qzeK6z1U0tjKR8lXv0NEEFD+BYVv4tBHoq4H7RSVU7HlGawWCu+pSIqJFyVf4ORMn16KV7srMrXzZuRTBUSU2qpvFlnGhYKzER26FZ3VRS5HQfxnj7wgqXs8GKlLkdXonCtZxZa0IeKuxdikbu7Lmb/OLtJVi+osFZiH9CidLY8nqV460dJaKbPOq7wdmLpx1Y1LRTZq0k4fkzfQWbZOiN8ZU6KSkqfoTHS4lEsSbQ3arSsWLa7LBWnoRoWrk4KaoJK65F7ZI7R3YJIOLRujUtvledcixa2RvjBYPStsa4IY7rGJIxe+dCxdozXXgR7R/wAsq/Gj+P8ARDVlQzxZ4M8WRvWMZMjGR2TzXXiCBP4Y0JjVoEiLwOlMdMWkneh4wRlBBBBBBB4jRTT1qf8AR2am3JN/I8kSrtWqo/WxZRaOnTwNdRFJGiDxsqjmztXSR9Khj6aQs5E7wQJxZnxaqn9EaX2laR9JXWfBN2xlIxWRCY6YI+hSgYyekvQtMHBNmSId6bMqpj6BIkZI97uhYLJqT2hOzKWMmyYrRJVTGEapPLoSTflYPaiYKXgtEE2ROCdkNSNd6bRfneji0idmU5yN4PGT4tUsZJ6yRBFk83rm6ESQ2RnI1g8p9FJA/TxjqpYVWnJbkLWmNZK/wIkrplZvGekx6HodoIFSKkgjQx2kepQrVr3JGD6a5ygajfBB4iWucZHkrSJCQ0NYx0qec3RI1Ar/ABohipEhLUxj30okm1XI9UbF6FkypJrSqZEkQrRsqyQ7QQQRZKyJOSGeI0R16Xk1JMD0L2JJb3xoSi0EZqzcHkTZogi0EEHBH60TjOdaxgi1KkSjr065PLKe3UMWKUi6LtBF4F9W3Zsq5IxpuspJJxmzI1TtnrLB8kklWLKbL3kybSThJOM9TxPg8mJz01wK9dmycWcCzqf0niU9OliJu3A8nhIrSNxonNa5J1wc9JCfsbJGOyxdkzyu2Jj+n/3pzedCwTGxDII7MEbeHapi3snCM39eyY/oS1vn+hPXEkW//8QAKhEAAgIBBAICAgICAwEAAAAAAAECERASICExAzBAQRNRMmEiUCNCYHH/2gAIAQIBAT8BwxssvYs2WX60hIY8IWEhkzVxiDJ22JUW0JieaLaLKs8y4Ko8C+yTos1EqZ4yXijITl43TLsWENGnDIrLXBGHOPNKlmyxse5F+x+lbGXTwu8z6PGuNl0OTbEuCiS4Joi9KrDdC8haY3RZKKkiD0vdqF/exEuHePJK5YUW/gov11lCzQ4WLZ3LaysIl0IZJ1mOXGyUC2j8hd5UaxWJTLFPUuR+So48fi1diSXWyityFlor4CFtkqIjxCNZRexEyOJ94Ss08ZR2OJoEksJbJ8LFCPJ0ePxXyy2h+TNl7FtsvckUP1IW10aRke8voU6IZrHkFif8hIXBY8K/RYyfWFFs0GjVLkbUeyXkvY9q98o7qZWFsbo5kKJRJEOx58ipnj62zI4kuSKrYpofZZY/Ix9HiwxExFZlJRHqmyPi/ZpSy0V6a9ckJnIlRZy/o0vHBbNQ50JXyLZFDLxONkONsuyHY9sihSFxjShi2N5ePx27li17LLL22XtkULhcnMmaooc2yN0JyiKafZVdFWURe3yfkTIXXJaFihbJoguR79KJViLssaFmtzNOpn43+/TZfqTy3ihW2KKKiuyUosUkPyCcZGlEouJ9YTy3R5SLTJ9GttHNcEfLfDFIstYkRzQ9lo7KKysXvaG9PR+Z7LG/RRW9Pgea0oSrEo2UjhdHYkmLhFDj9H9YjiUW3ZIh2SIodDsVpWadSPwf2aGumKX7FsZWHhb3zuR12eRfoaaNSL9Dyiy9rZGf1hshS5YuecN4+xu2WdNDEMkrLwsTQkMiIQ4krSI+V/Y5nLIdZeVhbn6Oi7KxpK23ixliy3QpFlljdkIsqkaeLIK2LMpVtjKxYkrErKNVGoZC/s7KGRq8OJxeGqIsvdQ3tZRW/saoWxnO37xQsMkRFhoUCCJi/iQ45Exyov6JoeyC+8JplqyHYuyZWNIiyxKymWyhsasXG5DeK93e5lieaHtkIQhlER9kuiP9ir6GrEueSXWHhEDSxKh6bIdn2SWOCyxcCE6Hihf2PCZZe5bWIRW18iW289GoW+QhMs7xHDQ4klpYpn5DVazwWQfBrQ5XwiMeSucPEusaaEPgbsXKNOGxCiaSity3ylpFJS2MS2tjuy2hSeGtl7KNI+Bsh0JbXyqz9bViiPWXifA5GoihvCrMli9zOcIWUIZ5Ojxdem8XTGIu0MbYhkRobEx+ShSsfIlciC3Ma/fpSFtaJRKF/EhiRF4d4huZQuC0IeKOi7OyPocyhlk+jSxcYark1WMjwSmNWjX9Yj1iEeRb5d8YW+O+aTEPojiTsgihrCosvZLFjExSzPoj6tOGjSKhi5ePI/oguCiiUbOUiKEjxxNKF6JXhPZQsQ3zVjRLoh0NiI4YxFCIsbwysuVDkzVwXJmlJWRp9e1EuMaeSI44rgRrRLkiTIS4NcnITv0MnRH+Q4/YqNKHSFE6Et7KQ0XRZF2IkLCyst2LDQo84oSEhxp2hN/frRRMQxFjdM13hovHBFrEGvV1zhxsojGhsoXoksSjzwL9MrHOVsckI+x85ootrapUy/SsPDENDQllI+xpUJDs8S9tZr0MvDYsqsLKZK2RgcDw2Vl0X9D6LKNOUxO9tFYZEe+WX2Mg/r5DK5H2S4Iu3wMSKxyLFp5orFrYx0R6JCGsaTSiqO91iKxRQ9jWNJQzwpr494tMdmmMj8bi+BLFiLLxPgTNQnso5w2hcj4whjmk6HIsSOjW/sb6rH2N72MXW+H7+Pdl0xq+URf7JL9CY0ahyLLRwWNfvCRQtjIyJq0cFEkyPGK5GsIY42UWUaSsViihxNJpZRpNI4H4zSLj40v6FhOhqxWhpPouh8mk0Mp5TwtzH2N2SdEMSdCkXZGxiePvFfCl2RfxGSL2WNVhcIs1YcbGqwsLd5P6KdE02yEeMT5FFJYSy8JFep+hiF8NsexYsoazZqGrGRH3i9jFittbV38N8IRQvhPL39jWUhEkI+sPCZeHE5Oc3tbw7XQpWPyR+C3YhfDfI/TZ2VhFDwih57FhixRW+jSfj9jLzJ/RQsL4b2S2d4TotMawiSEVhoeOhZpl7LWHYnxsr4FFfF7JLZLKy8JlYZQstYfe6tlL41Yv4tElhE99CXooo/7YWyxO9lbb9zZZeK+KxlM4Q3exbF6q5JFiwjsrdea9reyPxmWXeF6KF6X2PkS9DL+MhfCsschyLL2rK9jw7KEPCWxiWF7n1usTv3tlmob9KKK91DlQ2LHRZeWIr3y63qTQnfstDkNjfqQhe+bKsrEXuebP/pa9jVj3xbT9LlQ2y3i/ZH02WWy9rXJwhtGr1y6Pwr79sluuirF6JcFt++PfobxZZe6WFGxQK9FFe5rfBi2WXiUqG7+PL1tGlf6GLEPa3Xwliyy8WP0P0OVEZqXRfyUsJEet0svcijTW2sIv4zivsi18Z7I9FFEd0sPjFbEhRzWyiivg1vUVQ7rgU2Rd/DfY8wwkVtWHxvgviv21XQ1wfjZ45f5fDkhlZojuWHihorCV+9svattIr1IUFyRdr4UhrgSGhC9LRpylQ0Lj4t7F7KrE24/5oTUla+LWX6WhIkJfLsv2Loj/AMUq+sT8n0iE1p5fwK/3DF0eWDl0Qm4Mar/xXWPJGpD5/wDFslDUhwcTSf/EAD0QAAIBAgQEBAQEBQMDBQEAAAABEQIhEBIxQQMiUWEgMnGBE0KRoTBAUrEjYsHR4QRQchQzgkNTkqLx8P/aAAgBAQAGPwL/AGKSfy0Y38M+KzLqcJ6f7NH42Xwz+FfwydyR0YLC8f7kvHPgt+BcaJIaJoxtdktz/vs/iSNmVX6jW5dflMtKuc1VjX8hp+UuWX48EE/lXUy5NLg0v0JWmNkc3EpRavC9L8NjNV9S9i6O35SPAof4liZf5TN1IE6bdDemr7GV6dSaHYtTPqZfhW71QjmfC4ZPxm/RERYf8RU1dHY8nD4npUWorpwvZYWXuy3vY7GpP5e3gsR4LvC5NNyKqfylEdMObRkTIqlrg1w6opWrMzqddRnqt/yP1VFTVPLTEibfuZuC8xk4ubh1oivmX6jMiG49C7j3k5azyw94K/QU/lLeKSfDG68Hl/KOqjR+ZEvQsrFy25HTVipixSqOFn4r0seWpLsJviX6Mqbav0LV6idFVUGT/XcB5XpXlM1OnYy4OxO21i7FwaX5vN2RbQicOjNPxZZC8CwnGPDKM2hYUmv5XJm5Xc5pOWyHVX5RcOm1OH8StShf9NSq+5mqqmqrREVRa8HK/wDxF8OHPUXE4tLXVE8OpOkXGSlLVCro8rw5ubszLQOPqR4NcLrwv8LMsJf4ULctqOfy0xr9iN+x2FwaNEX+W7FdLicaq0bIXDjNXVTdsUu1Cux1b1KfToU07vUyt3V0x1v5fMiabpqUU8Th186fMtmKniU/w67T0GnfhvXsWZ3Kp1ZBmWjIwSxiJLeGWvwblvxLP8vrh8OEiV5mUOiFW3Lkpz8RTTpBRVxfNQ9VuPh0uMx7z/YpT2Jkb2q1Fw6noV/Bp5qm4EnxVxF0nQ4b4t6eJaoVraGVzDM6TuQaErC5bGUX1IeEUs5lKJSZLfjt448Wn5eBvY9ymhaJXG4zU0atu5m4VNWurZHGo5f101ZkZ6HKRGEFh8WpxSfwOHVVOlTshcPjUumpmVUxlucHuUVT7ETKM+5EGWD/ALSZPwqS9FJoX4ZEM8/1M2ZMlU2fjgnF31xXgnwx+ZdUO4/5XIlSr1MfB/1Kiiv5h5aPicGq6yjp4XBqh6zSP/T8W9OXlZYuPCj/AE9GtU5imuuiK0uh/wBZxuThcPSTOqP4bdrbFL/SzS6uiDSosn6snCFhNJJy46izUkjnwLGMc2wmRhPinwW/JTOKjc/1FX6EfEpjSRuyaIor5ekShrjUU3/SRtPgk9VBQuHwaamktSeMpoWlFCsTm5qlZHOr5Dh72HNGQ1M2afBHgkeF7kUlvBJKwRO2GVEMthHgj8zLI2L+x/qKM3m+58LVCfyvoPnv4/QVhUKq/fUU8Wp107NHFjoZuHeqjYeaFJ2M2Fi5mqLNGgvBBfw3UnK8crxt+f8A0o0J/U4XcyLanqKp6aMzqpQU08Kha+foLh0UKVrV+kdSqlaeOBJKelS1RVXxqf4itPUp4FL71ehxbKM2xl+Wq9OENFsUsNReBvfC5fwxEl7E0mhfCMJJ8bkmfydrnVmf5nyrB0VvVmWjmpqK+LxamqKfl6jr49WSit2p7Cpp0XikTPiJcyXNT17ioVDp3tuVcTiz8Xiddh1P5ilvYgkcq5qPHv4Y/C7F7lnlf5ifwO2FKWzIbvSZqfOj4PEf8Sky8KmluvqU/FdfErj2WHfwxVodiWnalxV/Qz06RuZlK4aFQti912JXNl1gmlyTvhL3J/CyseZFhYTjYitXNSVEF/z1l7scuY/cXD6OX6kmfh8tfUy8aOSxK3siuqp3ejIq1S1HwuI99WSorXY1M0wurKY8lKc9yzhKUV8GrzQKmuaeGvlykK0ERPdVIcNVImkl0ulkrXvuW5X0Iw1xSqYo08M438EEVHmIpZvGE0kP81CxphPInJL4lFM+r/YyU1V19qaBr4dX/lVD+h8Sni3esDq4izd3crfAhKLNMUrNSZ01PU05X98MzccJddyODRmq6nxK6oi+Upy01LiVdHdGZ1fVDdPF4bXSSXlvvMfuS6Kl3gnKv7kpQQxzQvUnCVhchWMubQnCcZx1x0L0lsMtSM3DwnR/nP6seZv+yMy4FL2Tr/sZczVH8vLThTRQm6toRHE4b4lT+XzNewqv9LxlTDum9zLloqnqc3woMlaVRVQ+I8lP3MlEUKlHJVwakX4qoW1xrgvPxN6uhD4tdNtVVZnPlqf/AAy/sf8AqU9bk0x608r+hpP74Za1yvfoNarGxzUXIWMPQzUEPw3wnCUyCSVhKZf83CMhJq6q30FZJLtJ/fDkTv8ALw7fVmVummnemnT33Z8OvhZUvmVoR5+M+kvQnRVdTLwuZ9R1Rmzan80TlqGqppj5qSaVxan0b0M7oVNZ/THvi6Ws1LLJx4ZLPGxOEE4X8cYPDLUa/mdbs5rxc7mZtL+b+xZ/Uu8PUgVdKhfKv6ltb3ZldVdTX0KXGXN9h8sX6GV6/Nmeg5rm25PzU6FrPen+2EVf4ZKvT90ZtVv1R1XUjUaiUTS89PQ2VR1RKc9jTDyGhZHlNGaFnfDTw6GuF/CnJbC6Mywv+FchFiH4b6EzroSouZKb/qrPNd6sWbWry09e7OfzMb20RVHsU00fNqU0jqqRprY00NMO/UZmQqvqNVQ6N+3+B07q63//AL1OR5alt1Rm0JpM9NqjSE/sZ+GlPY+HWrman7GXicxmpra7I81X1NSKS6JdKaP4VeWrCEXIZbxS0XLltcIZmpJ28Ek3/F1LnLhl+pmO5luZqv8A9Hxateo6nv8AZHSxTA30eFK8ElyrsM1sTNyU7U//AF/wT0qOXXphKUoaq3Mr0JVqupl4qldTNSznctY3qxmn7GSrUlfgZvB0Z1M9IrD6YaYZpseXCxfwWI/CiS5NKuU8GfLq+plmy+5bQjB4PpSj0LiPU+2NzLFzPUWLxBe/fCaXBkrVyaSKlcs7Y293jbDMtDMnc+JR7rC2MjTwvhKMxZ4Qy2F98JROFsLkY2OZE0MvqvDBceNXG62HXu7IjuQMuOB4sXrg17DqfUynxKiESz+YWN/ZmTia/uTSQ/Bof4NMYJPicIuKlal6JT3WM/hRVpjCZGOng0xzGsGuEkmUzrYTWGUleWLGZY20xQz3IGZ68M1U5SFhdWRHQ1uS9SS+hFXl2fQjiXp2qM1DL+C9Mpk0wZSYLVGtoNdbPuZalarcTfsyG+UtsR4LeHuSdV0KrSi25Jbwu+PLqZalDwsoIZqa45TI/K9MWmh0a3sWdvwVU9is7EJYRFkQRhKJW+GSoiu/Df2JV+GyUdsJGZXoKpaGhnV0WKqPfDJXuRts8JZK8VzlE8NSxJoxKYg1kt4Lo1LGhpclI0LmZE4xuZK9VbH4tPyjVWpHj7EdDsjTDthndkWOgxqcJWqIMtV+Gz+R4SRvhcyVYO0mXQp4qwuKSKvbCTmRZmpcvYiSEQyN8IRdnmL4WcGuFy2ElkXZ2IMrQstRGZFnOK43DXqJp4NDoen7if18V9MKaaTr1eFyF5uh8Xir/ihR4Koxz0l9z4NemxlflwzUkPzIlYLBRKGhFQvUgy1WaNcJku5LPDQknHlZqXPQsakELC5bCCDLAmSzTDlmirqjLxeZdUTS8HxOGpW9JNLwp4q21MvTXxtblXFrJ3qvhlovULi8dyuhCxaGVPwZZL/Uy1edEOzRBm+56kieGuFU9R4UoXGo13xuct1+FYbnUszseng1g0sdC7sTJOGuOalTYUe536YZ6PLuiw0+hXQ9J8Tgt/8AgqaNPmw+HTqfF479i2F6kjzos5Ohl8GekkTWqPi0Wa1MtVmXRFSinYzU6oyvY7klmXKk4fqIXYaY0jQgszmZrDL7FsZLrFT74Wvhd2OV2NDQ0w9CC3g1ZlqGvmWy3Ogng6Ntu2GdbGV6wT4ctOu5ZC4NHmfQXE4t68bsn4dP0L8Kn6HLRgr3O+MdR0MjZ4Zqb0mWp3IZ8Ju3dkbPC5lm3rgxR1MxYzR5cJxvg4fhg0Irs+5ovUSzDWYu7EyebwX1L04Th2xnSoitcy+aD4NWFiS5br4JOUq4n6h1D4tfmqLmnjnBk4KtbGanUyPVEMz8NHw69TT3Im60ZFWq1Ms6kvDla9GXUGTSC9zKZlozK8USWwuSnPgmBKmrIyaqFVG6FsSWwsXghrFY2IvhKIYm/Z41UbTbCmvoIl9cHTsXIKc3kW3UsoL/AGw/sRmInwwSaHpg6Wmj4tFROGfh2fQhmenRmdK+4nGKozX6PBuS+GSumzHTUamp2I8ELwRZdDzNwPoa/UcaltSE0zuSbSaX8MCxzUlXZFD6rBPqsLj6ISe2D/mYrHMyw/7nphDZ6GbwqoWFVLWEo6GV1SsZTt0OpHQjHPmzUoyJ3Y6noRbCGT1wtqWw1vjOHKpfY8pVz3IqrsXi3Q1cHKedi3gf8SS9LvuiJLSRD8PfwVehr5ahd8YGX3Jr16FkvoamsEyLoMsThbwtY/Fo03E9sXFP01MsyZqnCIy27nI4NIxyoknYtohuJIg8x8Pi3XUlc1GFvBzIUV+2Gscr0OVQhP6GmGeEWZZEVZrdCVSRTZeB09MZwlamlseLEZanNIlh3ZYpXe5nev7YamhoW0w0xla+OMcrIw7kZYZSnsXR5EaYdUVfuc3F07HLddhcPhmXCxdkSTEM5LnPWl2VyZqfuWo/+zNKl7nLXWn3Ip4lD9TOqJ/43Jpqpp/lq2NDQhKxBZU/QiLs0OV+9OE4w97eHU1NZL6D4buti3JUQakInc7+CI8FvDqSWwXg2xVPaX4IwifqKpbnPXUu1Jaqr6jdOrPK5LKmPU5qXP8AMrEf4NH6yeZv2JHVTTm6nmuanpeRll6Qjo+hOXN7EOmks/6iiv6kzKP7CqenqSoaKm1KkhKH2JXDq+ho/HubYKjiOFXoz4lGq6F2XqcFmzr3M1VdUdJxuoJOxvbB4Sa+JUiwy502S/seX6llBZlTqUKrR4W8PuJrQy1E7HNcz0ttb9UeaaN7G3Q6+pOV/wDyOe3cmRumnUvb3MyZKObQmJgTY4oqnqK+ivJav7F3m9zKj0JpUF2cup37E1JVdzdFPhmC9sKV+nQyU8R2JxjqU09B2NLompZW+8+Ds8JwhprHXCR1YRVoRTSkjU80epm26kUkGxYkuWwc7Gv2J+os+nUy03wqpXWwvm7Db2PTqeVPr2JVLfZs0gm/rBuXgdOp5bMso9ylNrmvY0/uObQrIjMR+24rO2Es1NZwdfyoin6k+DuSyUsJxmTM/bDuQjXDQvsQZiNFhlaMtdLtqeWPQvTxFTtVJHxJXSpHI4jXchq3VF5wtS/VszKPoNVNayXk0NGtiy+pMfYh27YWYk/3FLhsh1RKPsN0lqWm10JJy+5mVSXqcrofuTVQ0Rlpf/Imq55fcr0l3PNoXudsZ8prhy3fQ0uPL6n7nRkJo1T2L1aD7kCXgfd+Ox8Sqy6DrqtRsjl/7fXqdlthLwthTRutcL4KpUy9DT0MvxLNaC/hLuf+2PcdVNNM7ol8M5VYvKNmdyXcjMluTOnUS16tGkeiOVZat43JSTgsnYUpH6h2V0NVPTuRl/wRljM4vpJlSU7QL+H7FpjudU9ZOR6bCcaCtVcgzVZ49LExJT26nJrNzySaaGhbYnqPcnsX0Laim6J0k9Tb3Km0KsmMH2GZX5upl3WqGt9sIZ2w5VKF8Xy/pRbb7DlEkjgSgn6E1Fsbl5ZGSn6EU/w639GNOLdMERUxw37Gn1JLVVe5zfUtBmZZXJq9RseedRPv7YXm/cbp06PUbpt2FNMpap7i4aTtcjoZXt0JVu409UdzpOgrbEbUiURFoM1K5atuhHQU0nLLREY3ZoVGmu5JtjOpU9GyqnoxMjD1JptVToZvLUtUxOipZltOMkkbGZaGam1O3c7mZuZwdWyFBC2G1++KwhqH/QTWpEQ6tyId9y1KHHymsmpFSlHK/Y6Gom4mfqKmP8nU6EF63PUcX9PB/wAS1GwpH9yVUiKp9zPvEEq63sZXvsR8y0KnvI+IusCq/nF2646QSSzQthFN/UdS4eSpfczJX7nQuSsJ16luuE+w6tlqW0gv7M80iHi30QqI2MqMlCnuXMn1FTQL9T0FqibVHQhiuql0HZaiwU3nYiMpm+poXXuWR0FozRWITf1E5cDykPcWV69Ty5n+4lV7mljueU5aW/YV7mopWq2N7dT3NIO41fuafcndMv5atSeI7ObD+G7J6FMqGN1eyLUtr0MzfsKCMLbCMrsOemmPcjLfCN4HRsZidnqZKtSEx00eC5Hcm3Rjadi9d3qRT1JZn+aq7NJZnevQvsh7ZjU6dhWj+omjSIMqdy1Eog29FUXS7dSczk0+gtzSX1JeDWb2g0uTEexr9hKl0/8AIjPMHY1Jj1g8vqa2Isjk3w7dcPXCUpFtJG5v9TpsShPqsNFEHMz+pZb6ipnB3iLHWo1LH9z/ADhJrcy1bEV3pZmoc7GpGLexpr9xbCKs7EaSyI5RJWLib0FH3JqMs2PUtEzoQvoTzExfqSM9SKEvcu0uxGb6F0SjrJp7mjLUL1IVja3QiETORk8TK8i3I4dFX1PNEjhWZcipwOjh3Wqgurbpl6dP0jcQS2rM8ysd8IV3g316FtC2MSJbmkDzKMLVdyNurNWzKlzdWVUz5XGMLwJuVPYuiErkRBdwjln18M9+g/XwTBpfDU6WwjTuOmpz6EYTuQzymj/bB0KEZvujt3FbXRyOhU+rL2sRer3LNc2pL1Hmq5KdkVU2J0kooTnqSko7mX6zoOuYkdfzDqeuHNdjppTnSTLVRqf4NTMSXRS1o9UNKz74f//EACcQAQACAgICAQQDAQEBAAAAAAEAESExQVFhcRCBkaGxwdHw4SDx/9oACAEBAAE/IWVLpdDCbR/8HUhB8FEUYMoVi1iNrn4KzHEB8CKKpVAIBUJcz1AipQIUEQYoITcai4I22RgqNYFID4BMGrhbipSqGGyYPxr4yzHEqvjW5Q7og3zB0jyfCmCGzqMI2wMTEoAzeGOIs5gy30ZsBfE9TOSyUvLPA+B8PFKuOIs3DLowQQfAMIJpgXGkIQnwliOWoMcspJZN5pKuYY7OVS8yqHLHUUK2EbI7mEpDuEjBrMa7YHwa2qJqGdSu8tHu7gdy4xsi1DMLKqIOYoShM5n3JT8jDL1PohdFXHf85PcBHMA3eWJHqQ9vxXwNS6i5jaOIqyv/AANpczTMKPxSuEAg5hVcBDUpCjCpLv8AEkLIplN4jJIeIRSi05lyqYxLhcacywlRYGNAJ2/+FVlJqGT4F0IbEzBLKqhCYFsTHiYAyj4Mzmo4LqOgEoSs5OpY80tpHo1B4Z+Jakvg+LzWbmosS8wmKYsPnGksnPwCBiJBpmsYvahKl/8AivB8P/gNfjylYjD41GfiqYQrEmooS9wI/Gg/GkNLFaal4PHcwzgEZ2y8wYkT3qCmmeaXIz6sOtty6lChtmmDZQ9QhiPxFYglEc5hcWWQgrNQ+AgRsLC/E4E8JxL+WZLiYRtRLgfMRMkUjmJn4HxbK0oUbPc2wEkNURZDcFzaB8ZwKmPwau1lkVTFm8bbCWBVOaX0IvdYlCvEbS20WBbHGK4aTBlbocyl3PqWZl0uYVz/AODKASqwjgjmDp8zcCMX/wAT3GebMYD7qUPZh1A6tGLHI/HxFRivrcRd7DNwXcr5JvRV+5lwndfHDFtjHojNFVymCNsDuPLYvEuM29eJeIJBU5hqWZlhLlxr4Zq5cZQZivFKgSkV1KVUNY5zEKKqZQZl2YQr5FmUtcekSNyrXusS+2V1FDMQSL9zahcKvgIfDlBXwWWYbgTbKojGcX41qXuDO+pU7aGOoTJIYlfh3FuPCRMoJt4diKZYXDsg8LfcH8N8/wABMS6+C5fkvQofSy5WD1AiCjS8k5k+chF2CzfUZuClpmMATJ2eJdV3ts/WMF4HPDFR4F6lts+HgiVEl/FcUf8Agyfgsm7EqEQELq6nJKnOoWwYKgfyzwiXK3OHLIbwYEqxCDiRw0zD3X3EDEVLKo8JjSpLVKYWhuUS8TeFsd+DGOHwoymZ4yqjWlcRU8D7hzhbM9w6IY2HjxM4AvPMQIOSWw96IqpnCvMfw58oaso5dQAi/BzPoTFzBA6u+khh7f4Ztp0UnYnUvkfkqDLp97IpGKxlx6gAnRaX9ie4IqLQkbOsdzDYfiABTEF8R3/4mn42aSsygYjmWyykwCEpcrMJTS8yvmmNcUI4iMNWlQHmI8kWWeUOkK98EW1PDK7k4l+5MI7ZdRQ4gxFfjrgjiKZwDaZKmOBDEtAlb/hSCLe2IZlrlTIe8DBG1m1RhxESsgcvmCx1PCaxvpVy0E9pgin5mkXechYzxzKdXpX/ACY0tDmFTyGvEojhTfcNxrV6/wDkVxWGRp9ZsEZx5gAhclDGOzHSpFqmU9+yUXwis2IwYJzx8Klkb5YoruJEveZxDcyVEcksumdzoMwEho88TUW4ry38LKFm+IyjUD7SZcW5JWStbngZNofwBZZmXI3+cqTRBFMkxzEXLinM4I63KsMk1sqqelcuFI/cvzI8TWQOXmBGJuyAdTTzaMoSikRbOMQIjgEFFILCUbLZeHF8QdScMB8eGNMyUhbcK0lxxCj6AcTYaPHEKl1jRcTzW4P9Uo7S65qcoF57MS9wwFsmcJolznHACUGqJWnEyhBcymSKxdwBj4YYs6sdy5dLL0VQL9IpNqJyYDXME4l0VHUOV+MpUXmmBXEhmhyQykRle4dalIa+AEpNaXw/aXqZysuUMbotfNzIFnalDzFDbbitN06hYtWYMvq08vUVZXN0BHdDzufvMPz39kv5Yt0bpF2W2zKaepQSbX7E5h1swnNyphjjh6lmrI4ri4Bq7X8kubjhgYX2xH/x6OJXAiwQWo2qMkmwVmYYxCy6alk3A2n1Rj1gWvM1uPM7R94C+5qsNyUPw1BNYhGDEEHT4EKXDNTfZcc5mpXL8DNnUYYgm8kCvVMt8HZ8DOSWZSwcVBNiDO7MxDAZkxLmTEE8cwYlyvcsNhIE6O+YCurz3LQuEm1qXftCOu5HUacAw6j3+pdr0H1zAMhcqesQbNkt+4S9FVByl56OPxLGV6PEHbM8cHmdtIMYxSwIyDORh04g51omCcgcZmDtyyZwgrrmJA8mXS1RBmXFu2pQ412Ii5OoNtz1NJmNWHimACIGDUsmblIVmWWiZF2wMplcq+ChJ6N+AYuXGJmIgNsyYGcyr1xHMxpmDQN/N0xQvqNCAlmmEBwhTBHqGPNTNR0RLJlL8Ipmj0FDq5xOeYyVfGG7i0pQhyEB6jCpphn20SlijSAPcNbix1EnhqEBjeWCxwjBBVamMw1bz2wQn+1HmUAarGFgZoeTzGBNLzKtQax3E7PJPxQjqwDVvCZdrmK4WeowIy09T+ss1zgJEr6QMxJuWWh4JQTdPTLS8/UuFe4Yhcx3cDFAyHwBs5lbwcJW3K3EwpNR4yrPMwIdEqO5pSyh1EvZyS5f4o3KkxzK1mXbCbI+JRVzNlGIbM4HwuG5hLqVs2eoK1VK8s0XUVTyKWXnIT8SuSqaa/5Lls8ySVLuqHnUTJbK48RzZE9p3McqJkpQTkPG4ljVY/VlrRmB6qMHRgOAajWZcM7qHZAVP0i+p6gkZWLP+Ro51AUJRDPMwbYYw5g3Fh6Ki7Eh26qYww4GWAvZNhjzLlROElFLjtOUniG2UIY+OqRNkcGEZxAOYDNviCiuo7EMsMuNTIRuCsS2eP4qiMkWIjhgUYhlFcPw0kpHklCW6im/xMJbCOtA/qNvPtnu9cvasn7/AOTGi4e4wP0koTU5fgcfRhHwEaVcqrYUPUAVUIhFHMVIva9yE4aOsT9Akfp/cvZbCVOig5POJXAosRmbNdmpnZp3UfQ5UuYlR4mYjA41PMXCnJuUEAUN4mRaszcOfjAII5cXU7W3ZDCMsFc1L4YjoNR2DTErbTQ7g7EkSPFKGaWfWXW9R4MoM1t5lvRBaakvBVJUJIbQbSXamGhNz4WdyjDLItcTSUMzbilK+Jl3UFh+SaZb5ZmxofmNKKL26jKxLUe042DAzIS3A8pKmGDDnXiPH9HiXUsECvMfp9oVa3xMsRshK4Yig6YQzJ3UZ9EBrzHWw2nP0hdf41VwBfLmo9TNeMwGSP0g2bTgpon1B0SF7eo0SqzEcXFe4MbBKwW5SiVU2in0gXmSnGB4YOGoJoTggQJZZhLNI+Int8YNGD6YMhNyfKwPiOURO8mQzMWpcdfxZgsy7uAVJgXHESposCtzDEmWY0fdh2P13EBjoNq8ExhnQxzjX4SezFjbN8mvMXvBm0P7ZjAGCGJSXv426cQY1BeTUJDy4iBBrJEBAtMpOvCSuo74/cHbH1iAETUjDDQ3+iKCt4jG4PFx9nusuIpqsQDAhi1qaJxskuD8X76TOQyhNu4PD4MZkTZsiZaJhBi4IZ1+tT1aFyL8CpfihW+NtXcOcXiGpmdqVibGlx2QyYipXCKYqMGtQBcbmJaOYRNXAwSiOpf4uXVTzg5hnZyV/ce1SC9Xv8Qiu7Oe4TQGVNzKC6xH9y07ql2O5SDdBj/GJgUWI6x+peyUbXA1EYP3NhwNzrasjcM2ReLA+uRZkOIgWukOGrXuY0dJY7CnmWukTmDyOYoC9eJ9l8wBQ5cwYlw1LmoxbAWImMSl5mswRe2ok4ipmYy5QqsRgLXuZk35I8h6lyZizWbzFlCccEaiTUzbqWMDCjBeSYJe3wWfFgq5hmD+JeC4gszERZKCsEwjMo7h4wY4hcta/aXq2lPicV6nuWnBEYU3U6eYZXWt2schUunPhBGtManiDxNat+otQimt4RE9JCqypzwFvcOo01La79E1DRag/fEc3BVyV3DhBzTOEhy4ipm/rBGR42mKMsWfiSPkttLiyVG9EduSCBk4l7UFozxLm7YZkm0alMnkmDIcyqXeu49fXCEPDDMSrm1hv4RxuIhGTEz8Lr4MEujzMj4KyahsSjUBUBF0SlHBdGAXNwjm1XzklO13tQCs4DvjfqXn2ziFyBq2Yl5DdtzbovwZjpxz2puNgdsbmUoY6JS2PmTnDxULCJwuhtJrCJaKxB5gJD+YmvjAoupRYIxEuh/xNxMWGcpf7imr7Y/MBYHdafMOn/UIGUrc3xCS5qIHbMiKJpBQohOOfXxpOo7jUtqbLbBp3TBF3IBMFx7CNlUvIKjVkohFx7SRyFWbCFSU9RWkZw/rLypZoilmDmWzeajmAQCmCMkFiMNPg+Uuufj0GJT4TlixUcH5F6hhb9JkeJcNge2XMqstwfQziPKt3+f4l8lXRR+UpECrZf0wkWWQq7Dvx+5ZlIMF6lCIu70QhYDBzcEvojj/AKljQ1DCUUO3lAJbNph2N0txYXBX5CX9QXf85X6jqBrYUf7xA/hkb+TC4l4T/EwObOuIxcOk0x+sumX5Garw+ZSdpdMQ3JU1JTZp4jBMOZUtY7MRxWnXwFgEqYWzdYgKrmVdIZ4hygmwgOaSzSxhpKgLRzS7lZ2dErFnwEqKlMrHjblvxxGYUscwxLioiu0yQYqoOZSrKQu4sJOJFjSnP3OCBRRTsMBEUiynNeAN/iWbE2CflTAQ8kED7UQHeQKJSgGGj1u/sRq5wv8A8t/txEX+oV6+0pi7QRUQMdLiN/WeGOaUW8jqOk4BeiD+oixr1KTm6Cv4lAK2xTcLf2g/3Eyh8wL7rMbwN/C1++/rcK1da0n4faU8jsJUAKH7zILj4L3xEVei25oqWsViNcMnE6B2IFOCH1+JQabljlUuXmY5jumGjuLdE8DHgO4LWciPDU6Bm4SnJc2tcxPuDNMde8a4qp3IErTeQXwFoMFRFzAlr4RKqYEu4YmkNzU7YRJYiJE3ljkExUR4vUqq2dxLRvtmReWugebf6g4/rwfvBzS1yrZV7bi6JcShvno/3pQ3nZF5/sZ+0uppGD+lKLYCxTT/AORoaUNu5f8AYYYJfMmBeZ1XoEJh8mwvn+/tAXtnL+h3X7mqo2FX7r9n1gGww8qGHjm5h014lYEAf64kXnvv3HZ/zDhSYzSP8MQDeKkaMcJMiBfzHMglfhA2yRld1Lk/pDPqYNbC2BMFx9cdZTo/eUGGyXvMFMDDLMIwLKltyjIQVh6l7Qh9liDDoXKy47hShlliY4mozBA5hVFTEVUoYr5MoRKlcybgtEcG5bLFVMZ4O5bniP77fEShumDqXm2uV11/aM7A+j6TMqZ72x3YA+0TV1b+UEZeVglAb4B7dv6hQWXYzDFE2diZBSwVgTCQGhVl9QqB9AwCZVQH8zAnteP9/tzMihwfuDBXHZxHFjOl/wADOfJh/ojUCvA/RAda/K/qWL1ddxmlZ3kjqvtmSHF6GpfDJ1CKT7bjEWEwJedTLqrzKQ1nMWA1uBzBRMHidRSMrQNkuZu4J1+Ei3UQ0y3JcDfCoGiKEyZTLkutVNnibRKC3cqiIVUbgjV8NhVcog4ikZVx8EawbRi5EpeGUTemkoJaNpUSXDMTDGqxDb0ZYm2LwJ13KhC+3V/zLvKcpz/yFgW53ULAW1rv+B4/+zc2n69f31LdtWlgpeiP91HXRt6H/wAZRHBLiWdxV3RgdRIyrXmJ1y34j3sKTPmBzQ6fuKw2VDy5jG7eK8+YiwhsX9vP5ER6auwVp7E1y14l3kG9ncdkGht1EKlI3UxcBzA1+725JWshntEGEO+YjWlcrU4gPJPEuwZtY4uG3WbtgnBcSyP0j+4kuPMDMnvtIiLNcQuMEAQmIJS4koyQgN+Ic8uABgMD+NHyvWM0SpZ6iXqsckAnkgMEAcRWbhwt11HtMiWZ0VU5ZK1sN3MzMTO1OF+ILQzfOJmEctoXynPaBFrEsOOVGnXj0dS2R0wxRcU5zubi5KvlWD/fxAylZi3dmsfh/X3lS6H2Q00vP++kYU4D1QSg+pemCl86jNsalLPJPD+5c4Pj9wUVpcwLkOJQvJ7GIQVO2KAArL+4937fttbnscI3uJcno2zvVdwu6OIgLHhk8znHsQu4DQ3PFYVhHAeyM5jYKwEW1VRpgmjYqKw3KFQMU+w7hWDJwRhsmNWEjuXwEVS/RgFcTGM6+SLwiU2aOSEFMYik4ySoaO4Zyt1Mh3MOKBKIlWoOI3Zf0gqXqGWopFyIrCFphIgNpqUuo3tXxDDqHeKo28RIG/nEpQt74m0L7mQ/eu4WdYbtuf8AcQ6tMR5dxAYY6ntm2GwfUvyDFpPcLg2SUqHh94p2pcn4lCu+JQ3BAPabX3+ZaC2nJKc0htcjQ7hhUrzK49YBD7R0LB1MRm+6OWPtNmEcJA5jN35lSUbzEUMWopFKOOyKIqzkmJM+WDUqSjLbZAKwIgynDMZdPUXlK2ZxdwwngZmOnITEGpZCG5zBO6WXTKivwGEYeEOuMBEqleIBdiUNrDO8ib14hCL1xKyEm9StEv0Zg1ywvW8R6CpNFzkuXKoG1fxC7NRyOXULT8yruKEZFMa7gIxhvHL/AB95VHK0xA6EcERLVr7j+Yoi6NzJvywoVqpX1MyusUmKg4n1wjqO9C2epWa8YlQNdEpwGMQMI8e4mG6c9kJjJ5JhhgKvBqaidGFtd8RP4WeTNwpwmJcK2WDaxjhJ5IN9oPiCpVzBrZxGYLH/AFS7G7ydSuHJ3Kg5RrkmNhBmxPcLyImSbD4YnCJUOEq9ypZ9xwa1CsuHGomtGCyZmGqlKKMWICIUwBp3NwLZjzB04meG5QYHmFzYEpVmEKimx+AXgamXlUYnuMXYu5drVS1NJCNIaeEovO0MAA6qJ9JelMtyqla/38TERuHzMzrxGuuoocPEsThdRxqb19YTBjhmwBmmGgqpWt1Evs1Pf+/iZwxlHLeeoA+hmlI1rNTcrfshtu/gihTL2KlW7xOvM9vtGKAwB9UXYpPNzAB9IYNh+pzMm0Wqj7CG4jR0gkY4dMbC6ECx0jiUvb5gOp3RZU1lFhiVUqHZKzMfuGjuSvPyLZAigtvsTwVwaU3MXEumoo1LbhrBxi0yzvXEtrqDBEZycQsFyVSxHVG2IUbOYHeVhqjDLJ5xCNV1ArEtBWVAmNFZT5AF/uKhUzJZQdwpUsNQssBRDPvcYDRpD688B9JkNoRv6wQArUasriNKo2x2yoijoYY6NGbnlvmaRC2YhFsZlTGK0szToj1KUYioplLEH2jHsnKB5I4tIw4X1BmOHQ76mFs1kgpappGpos1OWaVHr2hYrzGew5ErcVADMEYmeKiPLMQuwtn9xDgczGmuZQ6sRLOpZuLfhUZAiwpLkMzHLMpbuX7JdyrbwjTO0QzmQVg4wKczrApoMzGybnXUWmj4Rrh0jrVhHcCVZK3ft5j6mz95UuV/j4UTWJRKl/TniLgG1vq4h9lRaqzNnncHtL+OIlGeJssde5l7V8fzG1UNZ2TgGY4R7Zl3gJ3QMqzuXlEFKPYwX6gDUXmdHiK/OklBXXc486xqKwpDVGOGfhCaFxbn09zONvEO01h9TU4TcSU1zLk7lLGtruZpmokxvukBmIgpMI0iilaZqNmVJxWSC4ISVVnkUriQHAcniZtZZkJAcj3KTcpWVstUTwzLqFOe2I7HPECrzK9imLZUFYgPDAbXe4Mw+jApsJgTVGYHA4zT8eQCdtV/VKh2EW0Y7jEv4Cxej+ZRg6mmq6ah1MUBo5hSOSrEaUNLIe7V9E8zWAHETwL5gAKRBhyViGmtVyzVT3A1HuGALId3O9mGatuyUNpuVkoJ7mpNFx7e8wsERwuCS1e7mdc1tiOHJhhReOJnF1BR1fD5lLBY+PYG6CHeYm2gxBVpvCsHzQ20LVyl7czIviZuiJKXKuSy0qZ2IW5qZKdxpz8k0dh3A0YSWlUzDIWY7RcFK5gDpxeJtU+YAMMC8FQGoHqhPUuVgyPUbEAeSGCIkQFJZGYlb6vUBXLmyLeDDsalvf8AvzOhjbPuMuLmT+47TNFUf9+YKFScStexUWoS7LMzuCzsQrAubneuAs34zFByF1B5FlDSV5nEF6WMW5DgYIgp4HEO2ON7iMuUTaKgrrrtMX0MkqPS4Z5II+cczY4lhQa9y4oWe66YrVxcsA1bmIBwKJNjco4go84ZWjoirkhmcxK3K06lAqN1ep2I54ml3BurGXraEP0Z9dR5ODpFermW85jVKsYVyotJkg4A1/M5OUA1eo8rUt046hqESpn8zXGX6fFE9ZeEuZgbhwitiiMVWMBaxRNhK9+UYAkdly6S8E1jrEXUGF7I9FniZJJS2n+dTK1/vEr1zYxnHECIV5wwMA+B/vDDr+vcWy9EZzRjx75jtFG7OSYe42QVSnuJ4FQAu1kTBv10x6WS7uDlK/NniLuhqa/6lMKnzErr9mI42tc3BAwajAirOGBXG/WMkyyp1BHWU4qotdcVJohNrqXBmttkmK0fJBMN3LYJZZAsTLx+DXSHtYruWO+UoqBfUEgw2FwzNsKxOJkTBH/mc/UCbzB5RgK4CuNE8q+CaLvXmXps1U/uIaitXImT6OoXe9wGH8wvOc+Tr4EBUVn1GRdBXVXF5DMu49/icHmoqHqXUTC7jdBg0iA8zuIXK2ek7DrzxBUtdS3ZEKlfMLXv7KQWoXMHKY6ABeWrlrCYR4TEsF6eYpKLoMcTK/FCrqxjGWy88SlNDwWnqF4LaQjZGfDMDiYfycR3iy2YSuIbHVQNtg2uESSEpF45w1KJZgz3LKRLkw1HkB3Ct9GIM7lmSxTKDuckt3CuXZO2LDdPWeQdy4RfUw1Z+UcVZ7h4stipStY5l2ODqB5FeEb5AqNZaqPAoeYhYcMFQPNZ9ltlPLzOqHM/cuRK9cmcTDGXkZIADkgiCyBh7WhFfT4uGOu6/wB9Kj+8C06bjrbOQruZBzuBzJGtle89RBnE9lpwXM37xxLPPi9TxPsYBdjMjFZ/Mo/eY1b0kEB3LAG04mCdTMG/CUbNemrhyRiAj3L4g3W4r0DmXtCxas8viIwGKPB5gPMsxMQ2Y754/MuJzHkhtIguuLI7u6omYviG7mwjsfWaXlnAKjRq5hTSVrURoOFsBUSWOXPcr5NL3OZXszGER3DkNrNgioJrH2lywuBk+kQGEuIU56lqhiaF+cz6DqUQX1EmWFDe9QfDN1ESHQxj8X6JYO4aMPWLekfqQBDKP3EGc19Z3Q4e3L+pcLvc3zT9Zjg+8EhxqWLdti4RDxK+efKWtouTmErDniAvjwJArvpf6g453uoOm/MTjio0J95ZoMilLCbb0XNFSFQlrMlQC8jhhBAVJcrdRsc/eZx3qmX5poG6gdw7IVkmNJhKKrOicrAEee1sOyqDKq+4hLup6nShhyCDCiKc9oRlb4jOGiA7ZkxNaSEQn1i3c5CGTFvJa+8b9BVOBy0jqDBoG8LlI2TpKl4hZSbywqXVxdKbTMolB5JocowtXJNr6J2cTJKDQljai4VBcpklZcj1D7vEFaW2FFz8eGmWUnsxHY7l4aSzr6PMYa3J+03yzNbFMWFQfY+iaHF5xByLvFxoMHKNxT6vcDlNf79QMDtcG4GDXnMtiqllHv7Qq1cMibmfIPiCD1MAmb9xjiNYTZW+yLLo4dQbZU0+0Le7H8y8rGtcwb16D37gdiUEBM0+YAZFzmoto8XcW9nC5mSKtFwk/NDrwG4bL8IdgjLGF49IO3xAKSGNRKE2AIY3RMZlovTLZGQ6NFk2kdnE7SppmJSF7IFmqEtduTOAngn0oE/iNWscwRag35lA7rywkvDm+5YZnmplUmMQ15cSgpABOp4olpqxcrCgoEuflKr0wt+pa9qlT2iO4cSmlI9yPlh5vaJWJ9OJi8l1TDQYTatMSwbOXmN127mAfcWbDPXUoC1VFlpLU3u5dNyg18PXgXFBF3VGo5XMCOPEv2rK6vpEu9Z3uKPIWW4H54rkPBI7htWu4z/sxHeoPfTOKmskDWJbcGKSCUUL4JlATD0mCPRCqFnJE5SjuAFQ1mDbqW05zMDcPJGbVymLMs2ywO0tyDcTBFpwwS9ZxrWIRvs6hFMDvmDd/auIevTncDhxttcN/RVmIl6Oe/hwR8z7wzn6/JiF9H3hfUYjvOT8TCTmZ15KhEK9S8eJQetSkG+LZeoMccfdM+xQVXriGLwsYq3PDUqmVanC33zMHOlNuKiXTPiO8Dy/xH7AgiHqOwlKbeuYoCZsxF4axBE+JhceY8+N09Sg1+4WriMil6KhBpXGZYiWko4MvDJ40+proPHc5wlr+WC445qLZK5mPulmsq7FDF3gRxkumT6R3Yc5TUOFPPmVNVAHXdsoUd3xxA4Ca47CYGPAmV7zkTNmG7i/+X+yLHK6NZh+EUZdMswWGUGEzagmE8pSupyAc4OJkFud5Jg885nVMSzQj4j6IzuW20MimZXMqVZgmGDuG1ygjJq5QaCqMP5cwKB9zlvhwTy1wyh6EPmqcdSmWUMtoeGEjt2u+kt3P+xqcOJlAb8xRH9SUpfuic2ugwx84riYfi1axiZCC9sZ6/7/AOSjFqdHUul0ZLZq9mLJ4qXAxHBio79TFeJBdad1KDfyQ7pUuqYJrxc+qacQnGbm4j94gFK+0Qbgg3Vj+IsHRMEW9zWQGLi7g6UXRaFdwcEpCgNB7yhTJZlqUMMu0z/cauW8bJUZWgYH0gxbPjb7Sl6LGAn8RVVqZoEt6p+SXqA8MsCAqblmqJ6moE4Ov+zFClDVZ1ABZUA33RV21MG+FtpSUFXlqG28rYrBvOecoWYA57id66H8wWT71xtZGWmCICVrmL5gdLgHnsxaF7K1AfFVuHj4r7zTb5uXWE0BrzMrq/biCmujm4mnT7xicTbs3Uz/ALgCXaQhgJ9oBQZ4Z0mE5OIO7Sg8sU+eM7lwmHnEOUMal3N0yS8qcle4O+88xwRyRT98TuTAvKJrM4mKytin/sppUylgzFe+Dz+SJwJsvMWET2mAu9drmMFGDZ9Z4yj8YiRGyjNzeLlOKlhbB5mrxqF0xtNwtJtOAmanpHhg4yRHBaRdf8IXgExdGY0CvuXCpVNOFTKtH1Yg8C4DcyA2Mzcm3TKUpbahxHYtOdRDKUu6hBcy77got00a6/EwFKazGaLTJmY1XsqmVIHqbC9RX0t+uYAWssY469R2vvWohwDzUBmC3MufB2uIxm12+szHL3Ay3CmzEdscIDRNwZx94vIr8y4XtOK8aYyi8zwlDlXPUd8S5jNykozldTbgPW4C24ZzBaPif9iOdFDdfRKKp54mTj3Bgobvcxq2/iOS1F5uMKs+IbxJc/AgEUXD5e42+GY7lDqGVf4i5WrdNXDMVRVD6RCs1+Ep8Th/EQ2cYvUXDsM2mmWBRme5nU10S7k4YQo0zC1FrivVyj7C5vVjuAOutJiQoveCEqi5+syebMF/hjPHZLqI6A7tm/8AEVCaVUIhMdFKKTpLDFxMBr75i5JnArxOnRjGogQMy7emYW7N55jjc+XcA/YhdA07pJUOJ1NMx/owu3d3yjaR9YuI5P4gcV93h6S1Zd7lluDdTNuM4dQnZ4rMSnk6GMlJq64lKFab2xWji79/Ey8XM48QyVnOpfNUGBijVQZBSs4fxDtHOYqlDMGBA3QsIAGImLhGjb3KzmXEV5PEKWUc6ljs4sRhgv7S8tRiA02QKLjT0HqMK7CnP/IYLrzZYaFoxffU6lO0feMrzxqh/JMjA4MTKpZqUaLOtSmZliQYpd4TC/rMeDgKphVtOLYDp3KX2A/iYfFB2oV6wPEyLLbgdHLvDAAKCkFsjGLB9ZkCl23RE+l0JNFOjIckOxvOkzAn3gCM1bzvUsqsJp+IrsF9j1iVzYeVv3hCm7UwzQF6Op3EWhOjw6lSwFEArv8A2JYs8as4/wCQENBxM86TdRwWjey5ghj6MsFdtheYNdncyPfMQaW0xTdvF4mBhvTcvZx0IEkeBL4gWUrvmYFl0Zj3OXqV7V/jKBDaYCpq3AQRqpWy8TDI3BZNYJYatds5+YgFypgCZMm8y1ZjHEALp7zM9LEdt6LXBa4Y1fWKEAcbX6+I+x8iZW+frDSjPcwocE6hV4lVSm5W7nv+EogPBh5TfBWJVLRM2iykg3x9oUAOm0ckT743dkSizOxIB1BuXSTeyIjyv93DrBpqqhaeH35hSquGsYiel0EGQxLN5JScuCjPuJvreAyzly4BzCUouAIwKOTt9xrWEOc0QQpgfX8zSxhl6meA3f8AzUzrTzTjQxYsTNmX6tQfaGAFcl0fjUxa1KqxKn6QagNdCKZ3iLFsfd9oP7FMdxvNdudEwQ5qKgand8PEops3bz9Iqoz04Z1KFTqO6ji2+qKPY8IXYpXNcTWkYcoVxxc0sb00JWJYFDnuAWl29xsD0mPZNWgt3iHD2pVY9MGM3Y4ZdTnT9JRwGZ18kejZgaKo1ComB9RPWr9Hc7g4hCN3X5mkK9RUX6dwy2OXuCQ5/accPuvfcYWgU1WBA/kOZVBGcV5ZhZXiXP2fvGV257iN1Y7SywtXLDdsxqWS4MyljHMUlaG8YKlBf8JmQDU5+h/UftLZvZDAfaA66L3zBhsNVeY1CXybiOXA2LYoyxug/qUcP28QKrsYdRCwJdIxCqAG63MluZs3KNhTlVW/8hHViPEUjMkr6oHwK6dwRy0pYDuOQH5RWdZ0OIqjzeU9TKZeFuMdPMujE7PUrtFYZX9pfJZyTk4AoDdxWU0Grm88rheosTPjfllojhVvmXZ+79cZg+0fqv4hu3i2ohLeoc0gsqANC3Uqci5WSPqOjgJYKwti0jPfiobmGFhCvcRMdOcOoTmlRLxmOAFU8TfZHLHY7IHlnOCr9ycSSVLxqVkznmYri3l9YLz2Mnnw+JdZo9CJuE9hBYtOMcS17wVDjzOod29xreeXiAktczLwuG/lKe1pK3sdPEa6OpfadVm+4qRcyvSx59zMrbUFDjuAOTcW0OJsbO4izG4xuKAe3Eo8mCrcrABwpywor2OZlWml157llllfWpSJvwTIuOxuWdFO1MCtubZZCA6zCHmpA/y5utptvEY9sHXE6gdSgyvgq7hCwFnCiBQyBeWJ4QY0dxzyWFo2q+pjpaY5jdJOXlPSxVoeVWsn++s18wS2T6EbcHQJXgoIfWRYzEy7Cz1cqDq9ktgAV1+f5IFfRpXqXik/VUMbpZZUP4mQ06QCysdcSwCGO2YHLyZn0BLfTtirSqrymi9w4jNMa0sMXvR4bh1M6shWbxNBqCTsacky/ijUzBCkDzUMXRfhFg7LDKThWnZK3n903ErLG6cwWTuZBFZEycyks5H9Sl01Gh0mV6gMghz3WXipVl0ioZg6rqbVwCP+0cy8VHqn7z3owcMLpHrFRqzBVoB7S7M6+5Ay8Z4jLshhu5WcjZeoF4gwoUWccXiIGiuEAyFRsM5VnmcIpx34ixu0cy0VjWkOhve4DyHOD8yqW/VUU0q2teZQaePJ+kz3Sr5zQUvkXmJHnxLDKj8kUBJ45RA2ars4z0w4cMtGcywoCAgoZbF2JSwd/wAfaNtIX5OIqaDxEy010SJQgq+wlNmF48JSAewcTzawlfBY+64DO27dzMyrInME15Q1EpxCrSpo1pTsA8S6rnB3MACuLZaXD03GrleLl06j7witdsNQvU48JG5r2xDRtgcwqdPGX3Lr/gEQTaIAdi4FUWleHmAmj25lI1yORR3EHnl3LJ3zNv6igUi+rzODxjCiqCbVOFjahstCol0pjIVucRL6isQq3b0HMzZxrwmFVwf9hUgLNK2e4IRFnKS3D9FRF5daMWFL+pt3XDVQZVPaZg7AUpoUeHAxYT6DiW41sP8ACIKHsdeoGgL7ERJXLBZCmiVzbNzpGNcxFul+kv0d7IlThfZmGgssJ+kwLz/CZYF53cBXnU45mda4ZxEA4aEr+IctWUL9xFi1trTuYsfQuKlx7WsxgYL9kMQVKsaanDdixafU0t1XdxpyekIvK2cPP9TGDy5IVsOY4nSumfMQylObH9JQgplcjM6CjFMHOtg8TUF4sXAvMvOTJi5mkAuhWjd/SOcVjmYKTqkihjVCv5iesGPUOKhAaftiNoWOS+88TQTTsp/9mw7DNdstabOpsCwtriBeY5fxMKzAcaip2z1KsvTzzL7s95zAOG2Y5XnlhqmD3NQhFkUacg9QXtd+eIyYDlY9BaGRqZ4ws+IGSN07lqJN1KzbN3AgvsR6kZLcxdmWsX4jsw8q7m3zGK4B9SXNGhcsQRUHhYBscK7QG1PMEUxbzcaxoFai03A3OWYOTf4loaEekq/wTKBjEGqzoI3MdIHUeRK/SYzydVsZnFnxcRW23cogFpclscC9l1c1uZ1Y9wSrLx1gQxG7/uWRvI49QNVrV8XBm0nFOZR7NZqjqJSuHARij6Dgy92RkPcXPOXG6lfGeBDoDw8+ZjNPcJFg4liIDMCOYBXUVmWFpgrM/wA1EpvF9zwp3mA5FNXbK4seB1FZZvHkiW/xmDUCws/mKsh32EwFK6y0MKBFuziIo7CDORZXfMF5UclRhgvlmuuScVi+4OnGLggwxO0rloziPM1m/EwzwfaZJbOCAqoDkUxardBmF8IJeeJvGscxK0cO5sduAU+8NyKuCzsdvUPMTUrjZ3Lc12DxKjYG8uYIavG4kVjeV3cRVziy6JZRrPMo7t4AlWyCsCeazMmvtUB1xCmIpYJxG6ZezmJc/hgPO61Bk8WszJq9qZhirl9sKNdjeBq15s3K2Oh9QTIAPwjgvYUcSvd0rDCor8Qd5gcdnERUG7zBXB3gysdPMCwbErgepUwNLnYHl2sv3nFuDL9pZrLa33LoqoLQdfqNohWyJassM0p/Ty8hp9XAQCqmMBXiPSG4mNlTxyiV1P/aAAwDAQACAAMAAAAQ2BkaWXEZ5b5eSoPlcDFHnEHIyQ47GQq06my+CcZEkt/966GCHNKgZEcmv9YOInA8CxWQueOwMAQSWN5Y9Z84PAYsTa1cVNZkYeATdhTJWfIjBzHz87GvcAPrfEzONH7gZ1PuQnKxdxaTclK1r542LvClcclHoBhrcbc7jPR+EvsKvVrOJL772RSzC3BXfj04YEBK8kf11C7CSQX2fo00ZiUuNBEDKyHBZftpkGsMKFpi9yl+OaOoqAdStnD03qDUqekuGIDVZmOyGZZ2YqRaAdoQfKIs3rRaEUihkhndyItFjrGvmt5/kp/tUuhbkkih9EnLkNDGEkmjuGQ9HxeVIqsHtvov4FDBsPsdsTkMCSLjhYJ8fFpqnEnC67IdBQDwa0TZLEStfTfO9K3P34lSrwflhOfaC79GpcafVEIFa0bzDPwzFe+wKY6hxSHnraFeflZ4YeZmyCw9g5bUNYeaQTCOeN+bZA9GvJOuzNlEZi78Q8nyhqhFXECrKT6D8HTBnKKlh6C88y/nWrGFCY1DkLZPfRZ7KnD3AXNIBIm/D8/jd3Hc2etm0yJFprXCxjVFw1EFU0ETyBxDbSB4H6EXtQOdwhp396dsDuPaP2bG42QyHeDWjTUSYnBUCrx/WdfTKebnWPinVTLavq8pAQ8zSlq+wdSEKtdeJqpOn7WkadnkmcgtD16eCZ0wFqa+j54rfyG9sOUBhtpO9sXWxF3JIbHURpJJw+gXaghmWe8LhPW1Is2NNMs4AJX87WNhaBy5RqZU00OooEW76FJJBPOWfKZmFJQt1z8k7syEuzIUibqf1KpqHJy9VWQMGdGzLH4NRYMeX4X1fRoSBexaVyKe7zPZRZu0+NgAYEQZRBOx82htVctSPDSsTaD0rVVBFplIb2z23E6AyyxXX0dXiO5R7R+9LQKXY+v1OIPHQqhGLRbUPBK223PIKCdXrA2sGhur6Tze9l/ZoRU0nLIuttbx6NMu9bc3OoHZsj+AB7yzHSVUZE/fPwcLNksMOYSJYQWkyWjJnNyCyioTIcMAzBVkfMkIh0KNRYc+F24tLoCPxw0b0VaTMA3lYTXcYakRlg4LKGUxLhap5zltHoZ5rM48dT32YteWPbaywJMfQaDXIq7zrQhQi6M3jts66Fhly69nsm7+WTT6Dsp7J8JvfHvShEb1HOtbzcrP7yzQErFap5QSShA9a7D7WUbYgacoTwrnjpUm0zNij6/fcv8AihbKEhKJHAuKTuHbIlxL6Nk648kxdJcDQ0fohZM5sDBB4uMQ73Ol+r/Tj9SUwZKVKKJ6N+adoVC/pC9SNbUnoHPvTeB4ZTZ5jyjAAcIBgY1sL/pPHaCveeKaRJTjYS1Xxe0L7lE1uwIzAkn+vzbxbjxo+EMLlhgFS9AjjDw5jOV80zzp7W7otxhsbG4FuN6uvL/DRc60p92VWFKa4X+5P7TnBnMrCu+McslNkq0OWktdyWMKSdyl5SUx7gq3rbwHtYv+DWi9kuMK7DBVO+OkBpSWDg7LpbWN8Ijeh+p5ZO6iG7rDqk96twD8m6Fkz0m++H10W2O/bDkStYwaDCABswn/ABJLUbHI4vvIHfS1mfpPAXzjHnsjp920DL22KawdQgDFdmGxPasaSS0QqVL0DsGODpKZUkBVI0xXqVQ5tCIU5RrQ15UUrgjox0u7cTHOn4Qjy4jtfMyqXXxCK2m8iv4ZvD8kFuicbmcUBg8gE/mcxsApJZ8EfLCSARVpCWG6gnRuhEqLKJq/o2pa1YRt41EGOJzd42TBRGgc0LFRMgZHZylYizygTITuj6daNJHvhIrgmaumCvnIk0ZvBZaCkx/rKk7L903rQrRZoZrWnehRp2ryo7fVFu6+VKdUt/xWtQ3on8ePLLWp7iQXaRbS+0QJnsIpGrMifAaH4ykmmPLptuB1N4ZKAMqxDAXn3RLObgJbovy1LM0w3DUGkc2xdBJRn+bSuQ//xAAhEQADAAMBAQEBAQEBAQAAAAAAAREQITFBIFFhMHGBof/aAAgBAwEBPxB5Yyl+Xl5ZBdEMYxiQg2N5RWIIenMIcKhsTvyUY1v4o2LwJnRMp8azDjCEtDWGmaUuGPNEyjeKUuIPCGX4oxsWELDGh/CwiaH3EEoeiHHjmErwhzonngst6xKPRRIxITXB8KN7xcwQx/T+qMRfkstHBCFhjRDmUT5LK4MoxKiw3uYTFsjNlKhuiWxI8IJrNfS4SotE/wAJhCGiEIJfCw0TE+GP4MaINDWUsvbJlD7nos0WVwUIQYlRL4a+UqNOCzS4fyhMpS/K/eAv4ja+YQ3lYawsPEukJvg0YXfp4sLWIYs0o1sX6JvRBsZBcx4XD5mlLS6hSlHh/LWEvtdGJC0eLOdzCHMCf1j8g1sQ6sITKOeI0aG7KESC+Gt/D3hczB/CEOML9Hw8xxj00j0iGxYSzM3/ABXTrGXCGgSnboQ2j0goeIqOsPnBNfpVbVG7+D9lOhpDJQe8QXRYTw1hsQxHGJhi4TLVQ010b9xYNSkJtFpRDY1KSiInhYQ8v4pS5uCexYaJhaOsDaQj6LXSTa0hBRsBJp0J1suha6QlEejex4J7E9Da/wAgxKk+EPY4KLaEhqkmFlMnXzpcspcLDExspSiOMTwgkE3MSbPbNEEttjRWxMlpicnotPCbTgg3JhorJdj4Ir8JjuGQWW/g/iYSpYLCYQ2JmvRN6Em2MrGUo83MEcKUWGsUTHhMSI4L9ZXFwexsf+0TJVl1DbaHpTYaNiu/wqe0MmbHCpTbxKjgjRDQ3hRPLdwmNjyy+FgnrCyxv4U4QOTQx/CzCZo8J4g0TD3RoW2xWCEh/Rz2LuhPRBUK9ejZqZ1sWC09Dx/wa1gi6MT2PCWhqjYsReC0IcLoglDhwJ0gxvHghfDXyulG9lGxiJ9P4g0UuUzHbsVPQl0NoN4fgTiiKWJNCUSo/wBGq7+kiY3ukExwGicKbGiYhwNlYYmVIqfBNDY8dFwUWnsa0IbDcKSohxCKJIY8tURPsmUbGLFKWm+4aqH7HpVskJsVqi9IWmGYtD/BuLReCKwcsTqh9BmgaqMUto0KFRUVI0aINYSE0URZh7EG5oolSYmb79EuU+EXC+H8oZMLWNEbCg2sZtJDnVwUlGNFA1We4NYQf9FXaWDSSWiWjiDQ2yTpRYPhxvDcQmoMQ0hifF0PC6EMl0JMuTQhw6Itw8Jj+V8JEHhDylsSmPpY4IQhRDdP4Y4REIbZXR7C9mIalFIbLY2LSEz1IJ+Hnw7Fgh7w+/LTEoP4MIcdNyio2djJvBNfDJnSHhMXzR5bKJUlIaxvQ+nRfRoiCa/TQ1RMIYnppqj8D1iPcUhrZBIbXDpDYn8JaGhrXwkS40xajYwuibOsEz3LJmbITCJv4rGXDRsaEhIWkMuhdGrh4JFEkJ6olBDEPCUUNDo9wxPRS3oseGLeEzwSzXCDUHghF2NISpBQa3TwXBui0N4aeWIWxZNCRTuO5YkWYRMdD5jwWfRE06KJQaIWXh9FIdZ0XCEXRmiE1hbJ8IQ+YaEQg0I6QWU2PKGsw8E4TLIeYQeG9iY0nseGxIVYYj0TGoJektqnohl1ko41BMg6aSwlSYS4aolOiOiIPEEIao0TBj4JFZYX1EBJ6N7OkEPEuCGszE0QgzmWsJhD6eDCJho44J/o9GjozdbHItkEKKP8yqbiHpxeDrQza2cxBRHTQyjFOCYipoa9wsUXw0eDEhGxJpmy2aGaTIY9C+EGsQaJhJiRKQaw4XNZsZKWWUaGsbguFiNm2g96hqxmvgvQdvbN/hcUY0QScP8ApCsrQp0emLZDzFmKdIQ4OkIP8CZdEjEmG6sEvlreITEJhiEJcM8+OhFGQ0Uf6y/hSpDaw2wpRnfpSl+Gxk2RBsTFGPgm10TpwbEhjEMSIIMpRiX5ifdIxNZNQgxMPCWZklFh3vxMz8Hogxwjw792yHggxLdEo9CxfwfoUhVHREy7mlHs4xixTg2xYUrGhohRBo6JDUwRA1MTDx0Pkxv6g3i4YsP4gkfpDbjROBPA6ENmqJtoab8I7CXUL8T+I2Q1DhROsfRtItyx4eOMJm2Jfo/SI8I2cYwkOFQ/4Nmww16MxCPMOsT9x3MFhv5TzCCwlS2/grUIh7Itpo9EJ3Q7I9YxH4K+jHEGipEHFQ3csTGJj2hC5ofC1CZdDEWFLBolYyoITMQ/wNnis3+ShvBrYtODQ8UpS/4sTw1iEwiFo/8AQJtdGkEycY1RaWyBUPweUPZ4WVDTXROCeEMuhsZs4dlqEoiHkxF2NU/ojw8bEiC+H9MXo/MT8YtIev8ACHv0hsSsSIccLmfgtkGqyRhpkOifRCkx6KKobIQnijWhLCRKcExDZRD4U/o38Lo8rDom+PKxMaJLBfw7ofykaKXHC4ol6NwSpu0aZg0Vk9KLMWGo1wE6LRO4VDh7JC5ehJfBCyinRVDebi4XxPilFbYyGhh62Soa/wA7NCQ3Fkv0/T+hrDGhODVK2FRSkBrwaqYQmfw2H2Dg9Baw0e4o4yCwl+Yg1oV4Vfcyn+nRfhMUSbcRBoaOmiqGKUf+W0JbOI2IgJdiw+HctQ4OkJ0KsciZM6G0PTo3ujI/4IM/4IsJ62aKUqE7hD08J5gtL4fzzoyCdWZKjRIfQgkeU7oemP8AxSGrELRsLHvLOPlCEJsKjEwNoYujcNRwaJGMeDFSYQgqVicHvZHl/kok/wAr8IbjctxQ5hM22W/FxzCQ3M7YPZiaYhnseHrKY1Rxw4HsaMYbEyxiKNqDaQtIMTjyspIcF8dEkJrwfzr5hwpSvBNMc8GcP/rKdUF9o/g+4T9G1wL+iTHRtHFJhkGN+n6KYZcHhiYqRsrcLbIQ5iEh/jC0i0uFDU0c+GszExcS4Wo3Gmig1RqYe1hOCCeUPCPMJUcczRPpC4UChDHh6LB9FDg7qLClOMcWaIergjahuCY2hq8F+sN/vx5lIaxS4bFi4hqFogkdj1sQay/3H4GoxbGLKQ3RIb/MpDMQWhNoeGejY1ofBs8S20Ue0J1bw8KUSYSKDrgynqNzYtC2L+CGqcbE9lg2nwrQ9Cl/S/gx5fyuiUPcMUUT1BMfwnBE0JjyhiVG8RBUJ8MDUeCYzweLs0EwggtM4PotjwuiciLRgtdRzB7Df4JaEM7ookRHvw4TQkXFzENEFwSzLsdjGjEy3L/cLhwfMJFmExBJdHBsFoUyzaxwbG4MN1FT0x6wggmuYWs9F2JhrIFhicEmX9L+HRIJY6MSGssq+kX5GTwgpB7x4P4JnTzDwncIdLEcTdIsUo8IaMYbQ2J46sp+Mevhi0hdPZjRY9ESGmJnTGpibEn5h7CHD3FKNncvVIe/rTAEtw9+Viz+CB/IaSWsEkvhD+fRtDZb8NsSYX7I9JGg0UG/BJti1pvYTBN+lNMcvWGeYgzo+ienjcx0mOkHhDxsmelxANtGdEdpCC7m0V6HAOkJldHm/D2jcPF+FtCBMkaL+44W9ErobxYT0NXxHZUyu0IRiMGEm1HiVE2QhxFPxhPFFDTEyiJj+nsMvpENCLRDaekN4F4vuO/MLm4Y9IZcXK9f01Ra0UYnBalJjddGRjRWtCF+fTS+Nn/R/wAEz0gkQeEIX9IyDQiagsGEiIpRDaEignSYnzRsTKXN2Np4oQSIiDRQpbi5YxNMhBNog8c6KMa0LhSlxWimqbkKUeH0/wCCnpCTmUIRDwGyBoxAiEyRYO8TWaOCUblZcWDCfpstGg8wSxRo3hSjeJ+YmFzM3RI9fLQxP9NlePcMlETCPT+G3oepCwzWBv4Nn0asnw4zsKJW+CKU2MZJUbPCZMbMTENen9DsuiopSivHhwp0g0Njz5laKaw0ZOBIfRCU/wCDwulOlF8dFjRfCnDkMUppGPp4GxcXDp6Ext0URUXYmPFHMPCZNjSTNjTIV8xCI/4T52JtdG9fDOZezQhyiY9jc0dJj081lC4dKdNFKXC2fw0wcew2N1FA+xKvHuUXRJCYuU0QxTZE2F8f8KaHS41lKqyDTQsQel8Nwp8P6Eek3h/pRMSFo9Z+hPoX5htoT/R7PREHjgyn8wlqFHjIRgVKDbJaHeMfweGsJcIXcE6IqQQ1eU8MVw1jbFjTIJwqw0LEJliawtq/LX4I5hng7wJrTP6PK1h5gm+P5Y02KHWNpobjo70UojvyajwnMitG3EIOEImFpjPPhf5/8xcP0UL/ABTF+EiX/Be3MeKNlt4ehuGxJ4sZN4WHzKHJSwZ1hCLhGw9D2J7OiojpdT6/7hr4hz/FOmszDPBC+4JdioPTmF+4Z6IQ+/FL8TZvzEF8paGJ4aEoJCy8r/Jr5p3MQ9KiNCTeKqbIXRHg/lvHYb2QWGenvzcrEzr6hPhYhF8PKZ3Pvy3hZ58f9KpRL1lrLrEG8LBHFDuj+FKhcHh+g3W3j//EACYRAQEBAAMAAwEBAQACAgMAAAEAERAhMSBBUWEwcYGhQPCRwfH/2gAIAQIBAT8QLZca5VrDD3OGWeBi8ZZBGEszJwFIJ2u8quWPfAZ3YekqWbxTie425to5JwrdL7E67u+6oQvVqKh8Wt2P4sd9hvqMb1CQTpDrgWhdHuWeSes3kb2L6TnHRnrDnRb+/AFPGWRKJvCy2xZeW2yy4yCHJdmCDgTMdMZ6Edkc7TN9chlvJeHDbsURiO872kcCaC73D7iXsiH0I0WRZc3yE3qDZ6lMup7YENf1DMAvch6utxd4YnQttmbINsyeBhjhm2XBd42ZJiOM3gmQy4PDDOyxyzbOFyCDOSG8U7lOHbo2KXXXgubBMkb8tPUP7j2lVnfC9j7IL3aInVPqGOjIp9Z+qP1kW9CJwc5PB5bxlkOGScHSLfi2WQcbw2fAyyyVbLl6uHyWwFyw4SHU/OKd7fTEjwsGCOF1KwHtkbebJ3hAE2QydCQfbF0cugLN/wDintPko9cE4mDLMLLIIcms2eQtzWz9EMezjIyXrnbPgHwS6WEPXAbMzm7pdzXntBhDqcH7ekj5dUMZa9X1BdQYZe+TluQj5wuEdrpLcScAEs6eD/3EbG6nl3Laythth2FlnL8MiLOG+whltmBfCPuJXAjkx3Gn8gLFqQii+viOcYTL3w71sNlkuE3q6aIL1JDSc9Sds3vePMOrWGXjux+SYcG6y3X6rz5be8A7JYI4G3kmRZFke/B7vuLSBXqWTvWsBYQPvZ1+r1+QH9g/cfQ9keoZy2Hc8i7sA2estgifJrwOlmyBy/ol5sr02VGNo6kOsLB5bB+2Wcd8lQ0Nb9E/+owMJbxjfruyJgksghlt5CMW23SO2/BZd5CLG3qDseSHC/mWg77dkcLoDL7PScdOm1tgXccrouElt9xp2yM2UZw7eIjju29F34feF2zY/JB2GOr66hE9rRgcofb64zhvEaL8pnAWdSTFsxmxbES5bewZZ9PGl+d7Y9j0IR3MjmBKQPqDey65O7cv+BdOoh8h4D1IXr6um2xOktH6TLDLrgZH7Qj5DTivcHA5wHV52zSk6xh0juSHCBx522Evbou19rLpLeBCeBvLwXsJiM2Qc6R63p3PbhH/AGMYm+7KOHhZ3TQdn/mUxj0Mh9hH+rE7WZJ3LbtiSaQTctLuVgGafINTB2ViDvwvk/Rfd5bLTgT3qyGTHcHHjKHdtCR58e3U9s8WuMcxmMvltvBwwXS2Huf3PSe9sW22MCQ1sDqX7Fp64ys07k/9oFZz2eQmK8dT0gJ3YP7EWV9WrpYN1NuxsA1Zi4SssQxDBt3aXZqwwcLJIeT2wtvYMeGV7BznCwnqT0SIhvc6jMEllt64QdXbu6Xa+pepU7bciA1vMFq2T2TrC6RtbvD3ZZTf/v8A5l1UjzLcAfq3JYC1/LNbI2aXbJn1NbAxkYM4ALOATuToLDyy6sb2QJSdWHLY74fRD3ByOQI75b6g13H4YOWWy9yfq1QNlkuF2xw6nUctvEW6S4Rm3dgu/UQiAr2NgWSN17OmwJ7j9g17u9nhrCaXk2OOGMhjt0dl2/iH9nbh3x3dSP3YmNndgHbTrE8RHcmTdkW/CO+Axw8rjztsdz13GreN4zWLpeNgTj2ZrI2Fj23q8z4DgCOQ2dM2RWb9q911lE+8+Qt4vrPy+gs2XY17tET0W04S/i8M9cJA6hrE8eLnt2YasA+Rpa/V5gZGr3f9lwhsnt5FuXvkLxDdLpzkdS1lgcp3edzF20TJMdTiVthYWxZBySPF1j9RdLLS09ljMyzvllLdurpY8ITtP4yMTJWuOlnBJpgjsyT3GO3VeqUeO+MhujMyTyRnUYkkkXqDLd4JI33saeXsOWfSM0smzrbWWHfOXewSXSUd2iVy7OHptjUkMdzo7s7uQiL7CCRym9Q1U7+WMOR4YlnV2OQ/cikzgSWhpayGDNtnLD7l9IeuASOmCmkKtnDF56jtjfdq/FjLLTOBx6htGahvu+udveMZKnAe92vYGkH7idEeu7HH7IS6I+kA2fAMCyznI/pI6qTHMl2YJi+p/bBGH5ZxlluZEPV37ukGcADZHS14HZqcbjw8PJT4x+xFoezgmwhlxsekmMgBhZK7kPecbMWTmSsj9WBNu2CGIKyY+yPqtMsTnUm0Obrza6yPwbb2SR3usdm3g9j8jy79fUOFvLMT/YA+zAXiJOhds97ojpn9IFht4YP1Ck6l3IHVp7YNmcS0sjgJO9nl8yxP5w6WP2kfUiMGVmMxkDLVbiyNNbt7gSQ6gB1EfHZgcY8sJO7LInTsvu3vC89SQjZRkoCW4jOiHB4DuIz+Iftp1MPUOkJ0yZOQ+DYbudTrZ7D/AHCO4bPuj7wTfUlnwy3uENQdSjpCJdsAw4EHs75MHUk6vUxXNPLycDFs8ex0gMCAwlmjiZH7WnDtgx37jFEOk/yDh274Lmk7X5yDb2KLG/BbySHd9cDuTXbUdJuhbHu9sjSI1Hc7JCIh+2gOQz2Ye7ct2eM643IC2CO5eIywnvk8GFhZ9kH7sFkx3BMI928hBwPG8uM+5ILB3CJELUA2yOowdF0jLSznJtXSHPbbUf0QDydPIR7bjBpsOMvIXRJe44AYRkhYJKO2OzbokXyXYMLDaXsHJwnWwsY9RmQh1LtqbxJ1O5d+t1HQdh9LJy94ON/OCz9ss2PzgBw7kY6vLbPiw6ZY4yupBYbzu/so5POmT8lS1e7RB63SA9lvV9BH3iTS2cjppgE2dE6dI33ZnUsox+JQm3hCRmzY9SX2FksEiPUKezjH6LEbeu5AMiZM9I/Plucf8+G8df4p/wC8AyjUxEOmWRYYNbIkb+yrPuyDWDIDqxHSE9j+TbxKJrOmbd7Dsu1gPvAJaSK2XCHeH8uy9w326+XvBYwy9I7kLLuzTZWz3h1dp9v/AN//AJBnB8jjf82/DJ9jeB6Yn3ssOPTe86kSdL+Za7Z/ZFgkhXT3I3qD7vWTPLr7v4vxKnpYtid2H0t/eTRpB6LsXRLNHp1DqmXk1csMgyEmW+pD1dTqA6kSTqPycntnX3yPcOT4bb1Hz22Zl+5YP/Uvgxj1Me59bYdkZ97vyMi6ez+IFp4gI/uV9cM4OCLjDHUywmFstgG9j2S1OrNMjnUi95dQCY7n8xQs2FixCuy2werxwDEwi7dXdtv3ET/jvG287bbaSJntkX8g8eyXIDYeDdnVuY/lecOOT7L9sks4OHSmMI+kM7g1iEb5Pgz13Hu6O4DZi70QU7+R/mT8X68fy9n57zvx9lhKNe2Q/t3mkB6ZPFuyI3H6hEiizZiZEs4eWD0wLTwBjtHVpZd4jvq6NnLJfka2PDO/8U51Dvvzeu36nwPxYsss4yyyeFMuru7f+LLbZJ5IekfRsWRvIyFwBksY+otFu3c+cF3LsE4yQg3q6scfV29ySDK/HP8ARCPAQwhm3/II7liXg+ZY7It4GGQfIc9uhlg2cawz2+4vd3P7dIXg7Fg3Z1GWFe8KHthiznT2QZ5sYbAc+Z/joGtsjvV26YLMs3jP8fZePJtRZ1PXBeOcf2Hf+3sIZM/2ce2XuQm3nIO+p6Y34t5blsIIJ2Q/vAO2WGL0XUZuT1Ztmln7nphb/Y8/x3eFkbc506QmPdl24Jjv47y8MTPczM6i2Pb1wNmcHuRQHuUj2W32QdiF0Ry7b1LSJNMkPL+rS9kPu7IQdkuy07cBJMOv9mDZszjMveSfbYeM43YlvePZ6sn1EmcF4I4W9Xl73DYctDGe8dQ04CyyfOoHYQUj9tlfqNV219Wp7G2F3SY4WdZB1G8J3/tn7dcESId8efD34tnJ5wtmz/JXKO70Hw3e4Ym5R3EeWQcscM5HrZ50ySTu9T+4B3ySveCJdHIf38d/wUOHf7B+4RnHA8HGRZHDHLZLZvOLb5De21o2I4w9Wb1G5ZpL6bLIj34MkysMvxPfZ6L1OLMgHkuRvpHvC5AuukkLPxzl4znuy958x302Y2R3wcNuzw8rBLZvLwcSvtdfUuC9ILd7g7gmLOrNjrqzHlmyxGqz9tNvqXuNDYdbO7OHSUWercbeMs/ybdb64Jzk2S+4+B0yRZ3wxLkHD+JPDpd71Ekcs+o4HCZDN7HxOrBndpdDuUNntg9sl7llp3fYSpdmzjbbqz5bbPtEtvA51GYhpwkcH5w+7x98LynCrO71aE931NkXvIeBAyOuU/Isifglj7i8Sso7sVf0QE64CMMsPsdS3Uln+DZ83Z2M6cHD78PvnyStPLK0lh+p3nI5GkIQWRBy8vwbHohTdiX0yzZkWoyGzJmqe9L+3+GT1xlkyCdfIN4AO9xxnLfdk/sv7v7Ru93SXr/MdwQfVnKdQ3k/m1xbh2XhkcWDCk/l16Qu9xk+x5xkj6X/AEsGE07XdpwI+fE4Y4+74kabAGl1LR8t+L1I7z6IMt+JznwPbpAj4+WjbHBiz846k/LM7bS7wniQ9eKJDrlhYWE5YWJ0YycJYW2/DJI42J4eN4+rgmMNs3TA/cHqf3EYcef5hGqRxnwX18vL7ut4S7eA66tLYupPhvG3vG2PHtlttrbvweG0nvjBn1DS3IbOMEuu2ZbxtnwyyyyyyyDqGpeWJrN4dn/EO22yFl71wZ1iN4M3nG8ttuv26+KbfXG2/vD5MzLJ7ZaX1Ws8bx29mzeiIdW33zkNhvWX6WFllkTPq6N29t523ksLLOWwHYT5IXHU7Q8+vkMn5PDPB3ecNvwOoY5bx93bpBgHkM4eQ2Bm2dyBh7MJ4fYtnIDjBMYJf+JhPuzdZ7WWNjZYcqsJDbz9/EecZlknUcYR0FjJzWE9R3xtt3L8mfhnHpCCzu+xZ1xEfgGTfeBVkb6s7jjDv4/UcDkM8b8z3u2A/AeQ43kgwijfbHQYhP8ALM51+OfBss4euO2MQPqDGNQwyXD5F/IfbJ3ka4DTZj3brJ+Pd0/JkOjgPwE8htl7P2OGOB3q8s+5nqYqkJR9y7ff3wQDPTnz75OGPYbaa4MHYdWx7ifOGOBzhGImT3wtGOLeM5ecnCTHPgm2PA/XG8NnhZI+3ZPGcMPOdxp1w39fif8A7jHk8F0/M4T4HCGWOwaZDGzSDk8/yLJN4f3l+X6W22zyWc7ffxbD4r8D6fA+/P2H6ZD3taPUO+TK+U4enzOW+4GOC9WcDb8zq2JNvC94bO+M/wBDgfjn+J8wvZZg2vu7b59n5AcZPzj6/wAd+G2/B+HTb8Bt5f8A4ucZZGWFlkxPGfL+2C3hF377gjbUn5P+Oxxtvw3rnfhv+m/ELZ+WwcnP1McLBs8pZLIUH39gO5D3f//EACYQAQACAgICAgIDAQEBAAAAAAEAESExQVFhcYGRofCxwdHh8RD/2gAIAQEAAT8QKsGsfacYhFmdBKYvUxIkrjkRnhg71CQ1MQSo3Ef/AIm4ZmWTBMsKSWJeblEeUy3NsGozDQsLzIk8LlZcQ67A2wMTBmUuXABggvbS4ttiZX2RPaHVWGZA17iG7lGGozgHMeFTlObuUsLIMBFxCqrMFvPEuuy4YZnRLvJwcXB4x0RqJCfkNx2qA6iR5I6uVA1DLi7UI0c8RUkCwDIT2wywFkYRIEayuJrBHbjyTWEskxGraV4lGJd5NsUq144inODzMn/4ZTEI0hBjonIxQcQqnW/+Vbkg1qXOpRolMKYglFxdY2+oBhtqEuZYitTLZloEFSrDU1zOTLZMViFtEPGOnjj1MhWUKQ2uIRJsmUwwmBdyxnNAHMs0kAFksgKsRWj7CI1kxQalflj+gEQxAiqmXZHYqHFSopNQt9A4l1gpyRbh/hK0C14g3DOasi1FvUsVV5YUDViCLeBAur+5hNR4diNBQGcTmofSJgDW5WakfIy9zkN+YbaJdCMAWAhjx6hxQ2yyxREjf/xEJAqNREqxgAgDFDDWZdjFzmhu5vVHNsqrEWRNkvLMXE9RBpuD8E4CF3r/AOCs8xxa8CSsPEsyxKBMZiYt7bNWuFsRCRVdmopVLokwp5lRa3UcoagtXF0TiLbKin1CFNXEpwDQ1uZG2w2R+2vk4iFsvuKoEXSyAgM8sW01EKWQIUEhgFvPcvpyhqAg6EaBVsqZN33Psl3IEDjuVOCGwR1JwvMwxGrXiGNQ3LSqgNEsw2ssRjlQvSZuYVqJeTjLqblDUo0YpwQymVfMtWkFsTFBiaRleb/+Gu5GWqKHcQNygzASUWJkajSKhOGVEhiK4umCJ4XDCmptUdMBuYDC4AWN3GmiXCkKCWNaamdNSqpLACPEB0QbamDSZxNLuv8A5syQBzC2rjQF21EsVV21mXp3wRNo4JZxgpThmeyCVUYl7JuAgRWXuaNDvyRAxjA+YYamhrbUANNju2DAMZDr/wCQRoIbcuvqWIDcxZIBYQxaYrSI+puyyFlK0hUM3DHMBYJWNzlQ9cDtgAM3GGKimYhrk9MYEX2OPcUKi1URd3GogAQU4mAmgsQXcoIY4YKJUHQcPFfzABqHCoaP/gQavBELinB8QCTqYsRSJzYZomAl7vULwFdRQw4gC2GUBMrpiaDj8yyMHLzCwrBnE5CLpa8zOUWJa3bCbJbJgqYk3EGu4xTqZJ0VAC1JfWPQlIBiCgOVbxEaR3yRdQIpb1MMGfglxqVC6liiAXMgSlAQltzO8RRIFczEkpeYoIVTeF9RXkYDRBDHMVOaBmOXVttmjio5CBocPljqhGqNKsUMh5qpRKAU2vVfo3CA/H26jTCSnZiIUoDawPmCAW6pYY194lYoWs9LEN5dARI7qOo0wsttc1rDRnmUCt58QqhRSuBeD/kEIX0Ijwb8ahcAVhbxiUO33r/2UR7lwxCbRKccGoo5ItQoIAUECb7KFHuZRzz4iwIGzIEyBxAYi2TeRjE08IlRY6iEvGImAMEi9NkTFwIVKuJbqMLpGDXXqbF+5zKLZS4oxBhQgqzLzD70MOn1HsgAFEyLjExJnLlZUu6IwzEtIWN1GHJqWsMzYQyGKYCW2j34hKBdwFqbguURew2/iAzvljwh1GVaUs25dagJWsRCjsy7L1cqu1tL7aun67ojgTUXA4F4eEcjAs7chEylzCFPUebRKJMOj2/E66kWXzbGy1yABv8AyoaUQZsfI8TIFMt2fxxxGY4BZduaO5SVo2AGWWWOszEerJyrPD6maZ4bM3Wos9hSjlff5+YILXYh/I7+b3BtabLt8r9I+IwkVZ1Y5/EKGGJQ1z8TQ8zVwcag3alyIFlvcDVLKuXUpA4IKuYtDUSq4MDFlcVqu4FXtg8IQbYCDbC6FXcpLA1jMI1bZjrEsULxGlS3kQojQ2yjvSUgaBOYwsRzMD8y03uWLFy3AkuWWXlmUHCERbcMFyR6RzYW04iXix1/UqeZ8uF7QgnSFIZSXFu6gtCUWZXbEeo5u7g5VGTBMgK8wWOSi/iMEJk1YDCfZK6Q+Coy25SL2ZCvHx+IOaLJwjgGs/u5YJkKg3BGilxfG2HxbL81Vf1GxGDuLWP44lTjhpbHB0NxIC6ORxdhwTkGKuDXBHBqAacla9VKqgQSwc/+whYRpPlaYcApTkPX18TCRAj131thVlLPncGg03opbnjjMBWp2S+R/TUsDyidXtZfW8eYp43DwDXoSnkt+IBIom6zioFgW5M65mC7PSHAWemOUrMvAF9yszLUAM7lRKb/APmVjxlELPMLglUDRDWwiw0QLXcH1JY2DxcISi9xKyBqZVxoncu03EgIsWiMBWvMAp7O4pHEIVMTDKAYPFcdsal1rLnk7mDiXGu73LlVy5OajkWMEyz/APAVyzH4Q4JKCWCoaXFojAqG2SWXB2sxG+XqAgGWbUYMtqXDQ6jwmY278XfcpUUUxz767h6IN2YTHn2xRSLKWTOzYcalgwEaMPfuAlrxA4xfKU1LiSpGntffEo745od3j04jNiwBX7eau/ioMUdsK5xLCY2W1P8Av3KuRyrwPT/UYXqS6ROEh42YGPWys7ZJXNabLWsveNSxg4qFq0406Z+NfiXpFAE3fo+2I+RTLRBXJf4CAhjhmNHRe3Veaj7hDNgOny6hAbAg0rncZpVTxzKWlT6sGVo8jTEv5o9ZjxCDK2o4ajSZ02S9jwT5BEQME0m/mA7Ki8IVAwyxeaMXHcqzEOxqpRDdDK4VpJocSKcByx2glZj2HGyUlTLkWKW4iq1EaVNdnUUx17MCRSKWFaeYG6q4o3KQKSxQZnlmIbEHIQVqA5is4igJRLMDGJzLmVKcT3DUKxC/BTGohiBTExnSWqHMoEz/AGEXLZqN4ba8U9RtBFXdA9LAPucnfsSDZZFQ+XmeREQQ54g4s8eDiOMgqVY7olwygoLVXvP4iIjadzwcG8sIvwXgpM/EuwkMC3ZbsL+YzuUMyKVeC/HqV6kopolc1n4idUKMHC6TZGZj2qdrxUCckCw+oyCE5dnuj9cwpWJsJXvL+H5ljZHDAatURlNVXkckfZTJOIHULbBBlYJAALS4/bVw+ZVXT2SwxKahfVYldKeyACKTP1iWp7U5uJmUdECzW4iI6aSOm1KQg27v/wCABWGJoArZWIuGso4gSUithCCy08TQmEilTdag5QVhbc41qCeDHc2Kz3EOB7StRSKdlZZdI5AqPHhnASwU/wDms11AWIATmsWwcREBiZ6qCtd8CEKyiWN1uAVMxLEmNhi5U3Hm3csQIqSrggrKnkLwdjKc1gHL2Qw+9lT6uU5AGmHPL4jjXYMmqgPzDjDCW89/EGO8m1LX2vJxMrhI0IFp/LG5y4KvIDhyQU7Vm20tPPUC8IB3UprxjXmFHwqYRXrYEp2NxoV/DHTVldx0GqChIOACxbJQpw6uWp/LYrxYs/qNKy5Lg4maMtAZ9t2ynVoSgGRzeq1AfRyMrZaZriPYaiLrcEG93EPavSHEtmTb4juiaWVC5qEZyWA0jQxHvCUjlfcKkE40isN0HKReF5lgOhZ3lbuEi7Vl4n3uZulJzE6VO2YSVxEykUqq4l5MMaWCpXqg6uoAo56lWwZhholNaIAsA47mBqFhioIFPJZ0qXwMMq4raJMA9x7qOww9Q5CQsjmBVvxLECCs2eJnTua9USqmsaD0Rguc6hpcLqIozFViOw2sSsp7GYJLORf8GFSKZuV9WfMMhEvRv3cdtkhMVRgMDDoMO984a+JbW5ORWHOxVwn3a9CPfD6l0qbxy3CfnMoxaCcs/fuHyO54c/dwR2wIN5g+VEythW0aT1dp4YQB9jWYs9YvHoCWe5dmzun26mTc2R+hz7iFgYDNuQdxYvOML8D/AJLJWlGXs0LqZttaKeYkWNOWGU2M+GWle8XAwsCiIDIbPMUnXcwswIQq4gGXUM3wbZNIQz0PNupWlu2YyFHPqGmsTMC+yXlKGUexRlICroIFXizKZZOYpKclcSlamISPY5yE1AyhIkmwmoiy14gCC5MQUam4JKSsswV3zqLgQOSHX/I/+XaBFCt1KyCo2BljlXmFewiqg+oyV3MmKhzbuDmphCuY66ZlVFQxjMhBVBHM4Tjg71PMFCr6lLGi2kX/ALES6yKadyj6jywf9gZAKjsuOceSJSHL6OgPyMdrfNiwWqxm3zUVsK9sPb1HFihlzY5+4WFr3PGoggqc3zi4LNhEFFu/yR4R5KrAfBj+Y4ZsCEraK3o6KxuVgSbHdG8+X5hQKOwfIOPmN8KcGTGrgQ3MgLHI7fzFV6akAfhuAqpty+s4gYKVu5TDlVruW06wiaUmcaTAoDGTJCFWfcXgpe5HfTtxOTOICQJwGIrsw6Q2ihmFlEZzS3RCPmKHUPOBq53QJmy6xKy47mgdcRBKLJMItsWplRkis9xC5Iih4lidzMHGwdykBzMgg1j08S9R3LIZtlOSrMFQBH5iRqPgfMocLlgGhszCGtxpoVUWptAbNwAWre5zTUQCwRIBjnEE5ZlxQ8xCRkKytUGWC3C4cUtOCaKiVWX+QHuYL3/MJUBaeQXoauN40gquarEarq8StF+5n5vLjzSpyul4l9+m8vONlQlwGyU7Fe2fcuPVN4ZL9nXqohUDFB++ZnCXs2RK2wHbmZSgr6+IAE1LKotjHd16mGRg6XWQMU+ZSkwAFMi/Ko/gu4fJo6WGi89blZKGHAGP4hdlIgvs8xOJdVpLkMDCn+H/ACBF2sAf6Pq47jDO5VgKc9QZWGztl2DsRitAdkKg5zKEMuUs3gyoNgsg7gzj3CuZR94xMXIITC1L22ciLUqcx1bW5pHXVwSGzAREIb2ZEmb4BshADQzc8RUFsRewQJwqEt4eEZJwSrCxuFg81Bs3mAnPiWb5ZysYrSULlPPcRiK0XLCBkYEy0EofUsKwLNogL4gCrVxF4GLSKUQsELCpg+mWot3POo1VqFSrKWICDUzVygI6LQErlGGUqPi5aBSLRxfj/kOsTEu8q+4Wr3cLSrdQZ0tq5cJzDFWRIiuhZHo9QUFCxDikVeZWYFicNUv7hkVe637jtKN0aipKNVnXMEpNZiqAKZKpYQwQxGAXYZjiWlOzza2u7eJwdbCp0Y1UCqAVwf8AKogIYqhRStS4xmtovxWPqBKFfog7SuxfyMHq5hLwgBYp1M/RfMcxol6IGly9bwcRjaZrmZQpcIKfAqYW2E/AHiU02+04ijUE3cxGASjDxKspCt/ZISG03COCm2MOFKbicst+okkDyP8A44B8LIdCA4llPTMLQBYwqlB2SxLmoV5Tg9RDO3WojrolhYRri3eplMx4jnnUPMEoZl9Li4h1VqIAGFIYYhgoDwTAnELouY7DreCVOjEZMNcwBnRKVAExLkN0TYSa3e6YQFW5FtUYdbKK03mX8cqOCtPhmGmnDeGMttW06CUfUtG4WiJ6aK+4JYqIh5XrtiPu6d+ZUraWCFGQ5dy1ahvE0oXENBtQ5ziUhol6A/8Ae4xLXgfhvk6d7h8U4jXwY4dkVGAKS2qaqAytZWG+HTzLA6GmT8tD7/EIpOFkb8pBpYsNEeSi/wCY/oDmwp8wYFIyShRYcMRF0tGDCGJa6PK5dTDlZCy2gqIKu+IhlmOECPKW6zEOw3czUaGBX4hPK/EQNDCiXE5mCo6tjiBmy3hn2noLsW7mVGe4pIXqJ+UQXhuU0WrIQzLuWjzgkN0Yjdjyzm1AcyqlA1tEOBELEeoXKLuKhTbiIUIhbCJeWzzDFNyhT7ipb1c4l8uo0/3CILK1GseTmUI73A00S6Li7rUYi0LDPvrj7jJEX2XlXH1iW2IEyDroOWK4112Fazbr+fmYDD4Jk5+6g9y1XapiPEBQXejrjzLaV2N1u1d0d3Bb8ooVbqt6r4iI7S0yo2NjkoiRbKMUV7lN66ryfpUct8i4m8ivplpZWD8kDo6wFYqs8DVmPb07UmN55vnconF9xYDA+2vqWRJLQesvXEINzp8snwwICBlN/N/7AiGyKX8aYrNZIrL0d/ZLFCtI2fc8COSImmMkMcJj6B23LcxDxEXKEgjmIRrzQsGCZsnBn9po7OYATwxrayRrF3nEILu/MHfenULwI7MQNd0sIadloILhRFI8QkBDzLYODNw2JVjiMBu9EoquuIMMWOYdq7xMgIKMtYOYylawdsDZjh1GQvcYZ5hkumGEykBKLqZiRddHuIGx2MBljnIVEWGosyBLK18wGAzBSjHMRQosJhTF8wq55Qsh94PbKuoF8C9uz+Domj6pdVocWE9V3DTb5VZwz6qPaICUC4S6uAAEJw8OVELWWQwZtujr1AuboFDxblV+IUEgJ0GI9ULWEHK4xDUz1XHEMBxVsaLhAHNYXB8/iELRxeG+5QI5xX9oiAJIG0upe8a8UAAFZL/Mr96pyExxdePjMephY64R+8kuAqVWE7riGXZsrJ6YEZXLl8pKwtJ4v/pCofSI+QoPpvx3KMVOWi/Syi3IiovE6dyuhN3CQLTBIPcFC4qtxFtrdwKqRl6YLFLQNLpMrfEqy0ImAqhBS7zFGlQoItqNIy6rZEPBgoYsBDpMMs9alh13G8BcsrxmYB9iKMYI20tngh3wEuzJ4ijrcTggJop5IRhFe4N/qjGKsZZtKjZTuJiwjNLSVMt3Dpe3JHe6a4qB1l7mXWZaVuW1sgKHsVlVGW1Ips4Js2/7EBcJoDvzHVK7NdJgeXJBNgFXFvfLBRvUDlTHqHKu+qbzDngtdrKwNffUMNRUxjxQHiYJRcHZNDc6GCMSJZADnETYoTdcxbu11a81f/LlIOZ1ZccQTli34znuEtZwCl17jU40QukJrVQw6Gc1GzTUHtKj67NFXEysKt5h1wyKyGGvIo9bvyZxDALNVf8Ac6qzKJKVsD6ie9Whgels+H8wUcGxCkYWjSWRrAM+qI2YM3EJcxcxcwKpwpgJUX1Li3LqPa648yyFTK9Re82am3b2EMU5cMQArpcQrUYhooPMu+JPcUfHplSpaWglfq3BhgPMq1pHkO4zB7mUPcwk73TGA4U0kQvdTKhuBqi7oit7zGsVAspULa21AQl50wmVFMt1EGx+CC4JbD9RC0MGFYNQFzi8zdShkJm0FFGLX0f7E37B5P8AFGPHHMKqp5OT/Rijoe2I+nOMaPRk+/UHWFpkcbf4glx8cAutzJMkRZRtd0n7xFxFYKIo8OeJShQJoeS/5mYyAuoCZGW66Nr9ksIhwXXNA5lC5qrb/Fb3Crrd35BrHuK+B5sj61+ZtnCoxr/p6j/IqvJRn+vuYc9IAwtdUh/yAKVBYag2UqxEOkLY7iEHduHij/XHuBLUDpb2Kwr13GXAFTxjXs/UgYStvGePsb1DQmDItfF5PT+JaomkWAOW+IjSWtRQoU//AAJFfzzCig0OkZmiDdOZ5pU9HO4zquCpmek1KdOR7hhrdkDJjklOZcxgS3MSJBcLxHy745lpd9I6mW2e6YLrcHOyUmAZ7QmFGGMJd8CAQNy02RwbEYGZQupV6LEorNMxK5qObjnVHUXiZElrLkrFzRubzFpLbeZZbV//ABF1ZWMfCQHuLOdRWVeWIoC9RGxg8uoTa5TtbAb44wdH+wWZlYcYNvoPzEH6FqWWK6P8rtZeXoEOmlfzDDBo5d3m/bASQbaINtz9M7xQIN0gZXmGQvbGxhgQ93auYa7cz4D44OOIRVeVQpqxmPEgaeD3uLqyNqq8XoQACquktI6kYgoYaFH+RBODUOtq8eDuKsZWcqynuWBD5YZsTr5IWe4ti51gcxlktSO5sE1vkYYgdVdG+i/2D7hbBMCD3of7hHly9L94dL40/aMsDeA9XiBlq3rH5f7gqVRCj0B1+/IYRIMACPHUw6rjqZDV9y4ziGYEXaUdrYyVKn5RhGTki7K3GpncqU44iiynBBq1xvDTL5T1xEs0ZlJCPiAWxAxddLHtkeWLqCxqPOrEYgmzqY435pGU6MO+5RGDVnMyl8x4rLiFiHzDcMqBucbxFOduIlTN6ZmPEyR5ihlYHLLVSXquKQDMh/ESXdMS8oIcEsGChmKl2/EQUsFJSt9nxC+jvedEIlcVwDgHfzd9ROoHYBVJo+l+o1D7ymyFiXi7bgI+oed6Qp83+cJCKlGhP4+hiXw6rZCrCTXHyRce8RU5yKMe6S8EW5iWS4qNlWWYZjQ0ii0BtUxn5/uAMlsdFZKeYT4bt43I8JA1zTwzPgbpZiJoQ0cPTnzL8LJZeTyTMVjGJwc5W8+ee7jpiLYRatLazyFHi4U72urlEsfRdI5O0zX+1d3hA5tWX0W4/jNV1vjiWa5vI05EVXwDPOcVuZNKPL3f3LLacAjJ1/xjAUeUT0FPtj3EAX0sA8djuOLsuIXAja5h83owzQGHQCuGYCqQ7cX9LuVTfwI+ELgisjeKIvq9w1ssQtKoCWND3EHMUzsjZyhohGoIQSt49M3eyDOgyzgHAdMuq0QZ4QQVQR1EqH0uUS0krgPjKYWICrHqXSGCLNMqR0sAQKCWUYit7mhLtHFsSgJAMlAdzEXuJXUM7dxrrSYDU1BqL3+YE2V1LQOLmG2ZnjKhMF9sQlCkThfL9a9Xuo8cwq4A9PHxGgsSiXccjBfhnlub4JFHSrKvZTqGUGGvrFuDPUq1Z3mv7d/EAN1olysUp5wK5wxnbMygzZ/JZctgfJUAZHCiVZ6yuQtqGzf0RTaVeKGnOoCr1Z02LXBKCtAnhOH5P6ljdFuV8dO9Q5QththrTvJx4lPeGVdUMZqjDhXzyFKcJqkWiHCWaZ1ExUFmQNNFNZxD0Kg2KZNvzRh9kSI88Mq8315+47Oo4JfzEi0brOfDk8wUZXStOnf48VEYRs4rLx5iEOJsDsHouvOGyERDqVHOi6l8KLLiiLsuV9TDAxiBqaGLECwvi5VNyZQiGWHUuMBctwy4jTkeGWe7hJdJqKgVe2LMIPBHtiRDkp4hQiWYG/MCsA8DDQ/KC0w/JGYQGR7lBuZySiI4JKCTNw8FnykbtHkOyWIh4JlpuO9IKi6aErUrBRDCUXmBWcslW1AlqHqYJuDM2gwDcWyLGHQ64iKkpIQqbBAzKpbTz3EF1BUb/MvJXcpfJ7Oj5eoThFNgDw8sGBo3mxoH0nCrutB9EaKUbo0MBVvOl9BVxrB4Ery7V1m+oDIHht8JwfvNQBUwQcP9YUi0MO8sL/NevlMsS16K2n8D5x3GArw2ynoRy9Dm4SgK/wCg3SV5YINQVUgbAwNVVtZY0mkPmKc8N3xDWI8cDbvD8wHQGhS3I0efJDbLKdSzQVWh395lam9VKV5RHZnI7s7tgXbYJf4vX9Ylzjhif839QXJH5jxny40+YPX50t1lU4cPmu4ca2mUg7/lTd8dxE1aBp57HTuudxU2Tlmv9/vwZYAbKDNPNhp+FwXYoC6z8pshNTlZIZXF5WxPKQgB8R+v7NRVH0NxmtPOFz49F2kqtMZUNRm7QieXgtRJTxm2AgxE5l/m6DMcp2IYrG/UVtQjLeTiL2jFao2TUT4Y6ICpUQN2MsIc9bKg14z4hQeFUwrVxrNixcM0dFy2pDwxZiHhzNwBke4AIcYxLi6xLIn1ANMSYN9xAU4zEWyYjDHUw4volXH1RRpJZ2Er0DwQ1Ub4i9RWnuNHdysRcxXUVh8JfJUaDngPa0fniEKqoKAYfS8FdPUKtEBEs57TgwNXpjcdicKjoVAZznhxoKEMbKKbzvV2xk8wbF95tVs62oRKtCByrjegMqgH0ootUhTD/IvuuIgN4dgLd/WHtJhuFxwlB9p9QKOlAXbz/H5gQhoXbuj/AJUUA23B0fxGcwoltmznwzGttZHSwT74l2QSt5oVCiW8II+BX/jDUUACU1j+iMXkLBr6eNfCHJDKkaYp2wNXjJ4vWIxge6rTKM9lcCXQxzq9V35kWZkBoCGWAJDdRapy8BfKV4xiaC3lRzXqYQiA2PYfzXvuBbmeUw8o+JVY1ro7fsTwnTOUVHBf7KxsIICrXvIYMUaVkPyQXeHC5iCrWjZhItUVh1Gtm+fLDFUaS9xXSF2KY2LTasX3M1LXeUMWlk5YiXvkhlrDyblJFf8AycJmbmWQbmiOYJFTg7McyDgwtrNZKhmxTFbg7eLlxSOEjCXoXuKt5FnEscPqV3WM1UOvMScjUcazcUIFdTSDeotWXqJxtTqHLFKEQe43DQ6JQg8xFq+SaLYdJZ7imQtq5gxuG2pIUbfKVhWvzLyJlgFx9Uf0X9sxLNAsoLBZ3RUEHuzfQmafWPuVBlidg0ZqEDDiHNRo5oy3zsbRKF1rZvw/g6peC3rQo8LGzpZ/btnTihnBtf5fvm4bSFuHF/4YbBNNsfkIs0oYYhgOr/G5e6wNjmria6Kk9Ov5qAu6Q+MX/ZGF8Ma5oS7r93LB2aV4eQMFgcYDBnjoa1w/k6a7VsXY/wBfwyhFSl7mm/FXcAYUvYYvemxfH3KNxp1dfL2Dnkdwhti66LIPC7UwraZAYX3MUCeDl9f8iR2MU6e5ZqysskAIbHZNDyW/CnMqMI0c3zPCKAHuY0DAP8srSLX+EwvZeeII7AVRCqzNZ4gtS9Qsrb8YlWJbFS+Ac7FxmyirY/iCEufWp2E2lDY7IQWesYpULUGUdkqpnObhWW7nHc04rBKzIeYwuu0SALb0wgZLGyG37AzAapguEh85ib1OSD9JWFSuL6gxNDZftcJcLvJg8wKFU4VWJjsu0d4CQHlVd4hqqTxL6M+IFGonbDqXszzmKN2zuJTGuJVLhZSD5TLQT1GApMzSfKWIKqXS9/8AT/UKzIcv8Erzy6/3iYPRKPqeBaJqdjlLbF0X3inVw4CqeTyOQwvNAdSohgA9H97+WUtALBcFykQ8DowfvuWQ2yBzc71QH3mXJuwXt1f5fxMiqS67oP8AL9M/rFyhf2+yKJhc+wS/nX1EUsgPjJX4ItlgKHFKX9BHPi5ZzVnTBpFLAsxw14iPACNuvvjHUoTCgVT+zr3OFwLoo7P31CsvFRUBgh+KifkB7eufdw3ADBoh4Xn25/p8Mzsgn+/EMqjwkQ0iKb9ThvcVQSUuOVlkAqxwQ5yFb2G4lgBSiMCsy5D4hRqmimDhPiYQtF0EEPLpDX3Mp+6tQ+cyuU7sC06hPoONx3Ca4O2yAdh0x3agoHJDio6ixAil8w4i6qCcBjHmWC4QjoccMxPJrgiQodDUrLR+TFzN/ZhOIULLbzLGgtnkAF6Zg2VpDmNME66g/NC1HSgOcxuFp3cJ2R1IvCcbly6sjhyFoJQ5L0kFQOPmUw3DUx825Sc2keYuxCCeCJC9HUoyALe17nSRp97fzj4ilsFu2foglstz5f7MR1M+P+5jeXCdLlB3VHpZYeqw6tiPw4zfTL3Ybbn+400fXNf+R1M25bzm2v4+oEGpYuKzf8B8wGzw6gBt/JHXa3VeDP8AKfUVs2t85iPsBkgw3YR4S4ECqD4E/wCyvcBa+buBW70dpqq+40qxqaWc/m2IEHTC6cJfu/iYzRngc/j3Dt0wbhICLYYR/uYIihLH331nsenxAakO03bEreeF/mKNN11ccEh8pAqRXNgHjzH0o6RsiCzHDEcSkbtAKUGYPEq5C2rhg689R8QcFrJTXBlogIANZEqgpqi/7xLrlLWFJOssq338phoFqN+HvxESKsgphOwlN9RNK8gGA8GKgynHEy1kceIaCcRHmeEIHnNzKVl+uY5LLWCP36iAxl3GDdQvEZCpVeKmcEOQyQEZGoIhYKXuCcUcTFhVT42cIp4oRWu1sOOlLi1mrrUJVMMxyglsPEA8w0OYZBLJKlaW8xOxGs1Mmq3OSGLAddwlgoLvtlRVYWY+Jd8o2uEoJYorO8ka0NBX8zhFZOOQx3arXKvMr4tDzRy44Fr3fUEREGWzu4OjkG+4o6Fa9nXz/cIXW6uKyr/ghNJtb9Gf7hgHnPR34V/r9SibVGesf9iRyNo0gpg7/SChFsrjCvxErG5AsBo+vPiICOIGr49QH+oK6lBBX01AUo1/UceLhEVRaQoepVMNgbbP3xKjaFIwMxRAOw4iA8TqtREb80P7gKraDJPMvypeCyCEvpEslax89RrvBWB0Qy6LXSUqG538QACOaV+SFG4rGCK0m7UxuTttb9qgFQ4XNPcsUhpG+h6h4Htb6vkitQN6DFvwLBaQzHKb6RiAs98RlAZYDgczOFKMxC5Kf5grFBmp4pfhs4OIBZ64VPX/AACKS64OQg5vkKAtOqGnBymWLEEnhNglgRTxA1UeII/tyZNHlD1QcEX9TRjcMI4ZL3MQM/cXIri1ikbXx1C1SrqFicMZnCti3ruYEtvRLYlV9Dzf71MqtOUrCMuK6IXYMmsmOIjqwCYp1b6W4GgUftiBqChfEpU5wwyHDZ+I5w0K/MG10cXAvGKX5lIAZJnwyWHQj/n5lqR2XlyD9RE0qbmRX8x3mYRRtx6UgSkVTFks0n4RmXEBDp/dwa0GOZeK+dRrI2U5E69jCIIFaH8wW3CuOY6m13RxARFJhlzEPmyMluvgYq8+eXD+wyxMxa5bmGhXw5pMbCnJ2+IW4rxxPmXlR08H9kYnkfCB8lrJ/ZOBgCr8ExDmLBbdy8spDVPjiMmh3bhlQB9E9kQpVx8kzLUUywAtyx3UDqZPg3OXfM2ZE0NnvqV+CuuJYmbLhgtdNIgbq2SMjKzCZfJpl+seOY4jpjxCpV3klekADW4KCnNMGbzhriZy2vUrFC7iqqniKoLWmG4wcXMul3SCK3G4hfQlMqSgckziHmioOvRw1KNHDIMuUWNLlm1HujP3FQI4wDAEClvVPDGFT1jmlcX1UBtvZuNiIOoRHvGOsG7vJfiz+4zbar+vREV+YM60RAPWZxOnEwaJyiADtf8AyUxsGLdg38JA4mPybnED2HCLV+A9Ro0Gt92/gQrVCSq3kdKz+8QqmgoUujFPWL+IHNwVw7gvf/cxRBTk3c1l1EzTEMlqZ3czNW8PEM2jCOFTMQOCJWZTMsscyDmPu2XZGiuURzCNgbGXPRtrd+psBzDz48xJZTi48QCKwcnjxHoBXGZk4J4r6q5bmlYP1iJeIqRKt2SlpvEWa6glm1GKlCnDtGODHk8ISkMRE2nTKSqzA4hk0xCyOHmOklncp2YxW5qN4tgEC2Zdgxcf32NxxSoPapt4CCMrTjEuKA6xGWAQI4MoggDuAyrbUYuVyhYLt0EXuHASqrLmjUrdi5u4GJcmZYSTZMRicZglBDcMU3BgFguBSH5C1iFROyC5r07Dg4gdEHZq4b+swPy/uYjAQ5FM2QHDgzDDJgJtIL+v7nYwf4/Mcdl584v+5mW+V/MQHqfAubwLkEJhJBhznl8sqkBrLYW2lt1f73MFYAmdKfqVCsdeJf6Q8frGQr7PF7/d1EV+RTsunOcb1mOAlaVYhKDRkC8y8kOHmWIi2Rz7/Et8qMeIQyV8amSwPjEFHRdYwDBpLj2zUzjxLIpMZAlESFjqZc5Y3aS6mP3mrJdvRyvcqTcoOs5Ipk1Q4jrJvKruE0vpYjiHcyZgLKKPPMWN7VyGAruR6jmClboEIoG4baGpuLSYEGjGyHNifJDDwbC4BXzMC70QeiLddRNZ1Y14BpJUA22yyS3dcx20P3FaLsLErbDARhQ3fUtYzgzS1iCtK3yxUoHZvMLwBtjcLA0ZJgWm2oid0yxgIMl6iqfavqNWi5thQ9urMsDsh1fMYtjY2IT3BajvH/PEGkpqrBTPF/5A3uRHiMxJscjM29wqgQJCi6vJGmOIkujphmsN/wAxSrSyNtmveLjEOzJ+P4jFmg/f4hivbL4HqLAZov8AL9+ISMwqfVf9/EYqkp2IJcGQgtDhSs/n8eIxWgiq24/FQOXMrUABssKto+vuHkGzLdhbAEKDBXEHZWLamPn3DUCscv8AREAiso5ef2o1ynWHjMFmnv2Pnubv/ozOeK7ISGA7qdrTmp6ZRmcsyvcYk52NYDVddcF61wnEq4LajFOmXIVvo0yt1Wf6GDosFcS3K1pNkVoUpRT83FvImg1DYJlXT6jgW7plVbrQ5mckBlp4iJUQm/NRwlU2R4TT1NuB7iKStrZLYFvZM5L7QhaAiiKXgpTeYA32JkKUrdS2i1ZobCLQIL1qUSg1rUq1KuxmCXuNxnMFBBpSxbZC0VA3wSl2fCcRARhyMIpEq+WHFFZWCJ7Zeo7w8IsjYNHPxLuCh5J/kRXNNZ/LqUt07tuLSCOaW4YQKaQAnL8Q+eFTnjXj2wI1LNsYbjsxmAXZYLQHZ53BvCw+IZttHpIm6Kw1kLPgPuHgr4hiNLZoIGDezO/2og4XdBEHZWS/NRmgbnwX/GK5XkLgPnMEZWlDAP8ARIgZAS0aEKtmqPeT7gXAzaq4phNcAOCUB5lqK8Z/cVV2sAX8Zz9QcD2AOV9J/EqOSVQm/HUsjE8OYVYX3cJw171MhtZk3Lqy27Kv45hLVF0gUDsPJ1DgC2XZ5iqAZP4WBTCY4ZfrlgcfOvzGMDaJycx2KxVAUApw1/6m9wchpv1K2GlW0UZ5oQ+pdLav2SyVFNg/P+wx5EJ/jBYM07BggUl47JjF7YmY0gupZRTzCRLyqAw/YRqpkrIJbRNQQ3EQcNMpGxWJwMH3K1KCeWo8wdMqmJjTg1MEKM5g4BSHJBap0cZPUUKQaNktJnAgFOfPMIWWaKIptorMHLW47lzA7wXMXjZeOZboFrMX5riG1dvNlVwDxGgYJvVWrkrH3CUSwCrNOXO4WoZrFjfUWZbXgfowgi1VxbpydQYrZuhVQAjPC3H8q+XNm7daT34imR2QYDSnKwy7FQ8AF9qZghGAsd1/1/EMLC0Ivj/wxA5oPljMIPkfv1FDkNB8c/x8RZ1RyoYz8D9xpQJlyscXFmwr/pGLxrBY1D5wfUM6p5e4AwlVMg0NfxFojKkv2y3ElgCvjNQ4+mac/nmIsVgarMJb+9wYr3UaxU5LuPpWeH9zG28lWyMOZoXjiW+KWl4f+xQz8g09RRgFJHxMwC7+E1CxYwdsXgLar+I2MBGD6wFy2KGeLMsqxaFNPTGaV1yyvfMG2PC2vG8QPHc7lcnJbqU9KQFjL3FJLbt76jEtXq8xgzxGU5hpDpa4gEMhgSkat5lburmK6ooySjXCoMbGDriMR0DsRaqhu9wnPXEz8qmWbjSxlNlc3KMUUX5lJcSu255RXlj1UILp03MPHMNXFyhC57ESiONJuJSns5JiX0MILqbpdw2mDhG4aXAw9x5yi10YgtoVo2MfDllGS7PNn/cwEYJyTA58MXy2RvHPHFy5al9A/EKTWEH9/MVXzAcLt7PPEc1bDVXkxp8QEXSVu41UfxGZkrCa6xLUhLGxRaHNC/N4gAuEAOu5REGo9g4+B9pkl4x9zCRheC7/AKhxVmdijX5v7l41VgvRRrxFEWJR5cQtqRRaWxXMoCnRESKbxmOKAtVQ2gB22fxMqsynJ+E8G5w08NQhe1mq+7gK2DVkETUUAHPDL11gsOzshNIGhohmmyjLHQb07IpMvaZfkzKPZZKXtLEOtvP5hY0pBcdETgsHrqCTMCyES7oAUnyQJcewb1xcUBRXf5j0isY/QYFTmjuiYRgQThHGMOx7mK7Np+5YpDd+7lJA5GLk4VGp4ThFFykAO0MdnlYSkCDiZxKcJV1OTPEtsHLBLTzN5igC+SbI3ATh2SqA0QlEvxFhsfiL0txaVc3LoxbpY9iCS8HOv0jzqqtZ8x1IAwOCNDinLkY4FxKt0ZlB27rMOnLJkxAD+BrUU59OxEdY35lEgpbPh3Ce8ci58ynLgpVfuI6G9r8+uvv1C2A1fLTBrmpa3WYc/wBu/wBIOWz1LUw8jSQ7ITUJfreRgXlsWP8Asw0eEjPaHsEvbwgxXDBvgeOcx0GqLqlXRjg5+iWmhgP0KB8RW2lPaVFFOhxe6K+7lxoDU5S1/tfcBYA1TqEhrvxGj0S4FDBtA3TDGj7x/PDMe1oABW7t9SlgRTsS6u+ZSCq9peNNcWHzLq6UpSn+Rfioii1CJdbxdX/kWzAOs7fDE/AVPrcvcFMDuF5CDZXIyyrJhOZY6VbsHmuYlHFVZZj5mKDjFtfmEAchv+KwymrNtsPZh+ZaO1M2cxfWJqPW5CBp3UUyALBTCeLWHC9MqNAtYR0wVgeDSNfkZWYQ4mBuEaklzPp7lWAZFvVEbkX42dQwoKwMYCqe0eYfTdoboLg9Rlk1uuJdUJ27mqK5MZ47iEqvAEqK73cXTlW2KcRM0czhuVRodEcLIwiWSLCZeXNRSKjSkD8xHU23UJrzD1vwKfmalyIDQEwaUN4V7YbEqnEXwSgQop8sAK6ATUuQKYph9sOLmL2BBYmND9xC/Wzrj+5RdB4SkKQLpt5v/ZcKoMF43EjZQDlRxUACKvDOaf8AT48wYbmemZYz+qWWyawJGoRpVFJzXXcJ11vxBGKUK9MMA8vmMC7vr9/mZnGcdqx/P5lLh/IUt+f5laFxvPVY/mKRsoCaBb/iZf1FLxD5L2QFYXwUaDBRgD9slLwljdHnjA8QW3FIXQ1vfLCMmrUFLzj+kAbKqyuFfXUogb8CnGf4gyAJSHI1ivEVxQwHt+1Ky1uy0669dTSFt0biqGkRzzLk7L4ghQo5Jmg4G7P8TdPi8waZLwQmHU7a+J/64qHDCRR8kTLVKtUwX1DbL9f0hsIyxp4pjULtDSOh35gr0Cy9+Z8JLlJGMs/JG3b6qeh5PcECLDxTkuArqeSHl/hBpsbzMufnYhSCWMruXLrxmBkSDjhmYsCVAXIvEurJq2o5Zt3BPFC5ZhoS/uMC3iX+cbI0FuJTkmwi/wAS4UTcPNSnjiYUNZgKOX2SvMBQXWFHxAUKQ7iYsTADPErt8tq/qGIRyllHQy7Xhq3oimFrptX+oo6W2UdQIZRSDAcjardsH73Ki61XiNtg+LOs/UW7Vibeazn91Bi5RRPFQqc6cmjz5sYWFis1yc9Z/rxN68JWRfPxNnlUF8ZKlm+XrzXmCi1c1fz/AHDz5I3oUuKEXI0+pQFw2vrH9S7BeFeW6/fUYjbYgzwATQJYW/ZE6K7t2dopfPE0zmbLfEU9asr2rTz/ALLCxSoqtgYKx7+PExK2W6apOXnMWwhgjF6f7/2GhocQOXfr/rEcLLaX9P0VMAOGa7Xf+Q0gE4P7vyS1DSCAVT0f5EwQiwmMS0yab+f7msBG/f7f5hKmSmoQC0DdS/UhNkLRddQOytfERhqYI3/s74dTLpxqo4on8iAGfiAvFUVqCYhtNePfiHK9LdryBr7mbuc1h+b/AKm00Wcgwt3db4/6NkLGy7/DqLFsc6Zih8xddj/sbMMgDL/sEjbNao68ke5UNbhEdk2BAFgcvEyS09GpZtixsI9jFbosyIeSJG8duIwaw0x1zBgJR2JgMIsp2gyS4AYckuBy5gC07oGrx5oYt3QL2/LFcXAm0dJzF2A2OwO2EW1oa0fWJg8ymlBOuKzKpUVup/kjrqGyPdufctvpIyB/EdemcGA8xQGIhz8uCWAr2BKKkNg/SUXt1+nf75l2jAGla79xzbtTGTPf9R47NFD7lxUNW/1qcTO13Fauvy+onPcYt2dwzumRffqDWKL1V/EEiiuxVZXt9y61ZDOcv/PzFdSr9XBBM4lPBEra59Bq325lWmrPx1j6mdR0Loc/LGzysWXmCiXFKvqoPMzAsop/7HloLZDZd54+eogJZYByC59d+OoX1IUaLVq3/XcKtqOwwuPpzDUEHsyOc/iWbNW7M5a+yHVoB8Nc/wDZup0/l+/mVkFmX3UsdgYYe0Cbu6T4+YS7TsaU3x+krMA6OQ+eITBLLsgmDQ8Swhk2bnJlu037P8lyTaRVPvcIJYxmIMjdj3iYM3hfCqLnZKNMcvljIg3uWa3eBcMC4Gac/Uo9IwTNHZBdQSJ2QRDzvo6fPcwis6QPzc2bfNXu5fGfE4Do/LzBgkNtqO6umsQIxtwAiKwLZW/KGcnkXczy0vTb5IcbjBuYyqzg0eF0fmXhgqnQ+gIstavND05hQsMiWnbY0e4xtrQGtX0QplKZC9Ay8xOhB5wZ/EVuGmufpWuvcKag4CeTRywDihW2d3OIasQItbrV3mZKaueE+oXA72MSu2wIC9r8yqu8VGfSNfEtjWJSFexXNVrEtSmRf/eICCgL6HzBtnAyiiidwmFNF4L1fzUGwW1Yr9QolmcL5QBqJa8YjS2lZDYSgVmY2Wjf1KgDK2VY5N3f/vqOhDJWX0r+K9QrKoWu8d4g1kLIsZEd0nmACDoF/INe6IgqACnZwKeYLslMgtrqVqnDNOZTshvBfycwevDL/SGKhfID2PMNQA0WcZ4zV3/UxBVgKmWnHGM4ibx9upbsTPKwWhkAiKXaXrJf5Q4KLU7P9TSO60MhzKyosxjWv6uGZwtOF4YAhTniuP5uVg0VzvuHg7PxCol3bJdkSQ5fJ7iEWQyGPxLABcjkYWHSzFzTum5VvSe3DC10Gun+v7jpyTIn5YhN19rmBxlGVBVRceGm3zDq7QQ0Rh5q5jVY8yhZkxeMnmc4w7Gkgm7IQKKq6rNWz8Vn4hkaVptUyvDVzK/bP437iFArlMV5cbjsG9WJR5XL1mGsQXbUdVe648y2bcZk8/8Aq4AK8Cn8CYEwyVfk4gSZ3JXHLEQLtJhOr/1mUVWMg7rl9ZlPAjkIFVVOnH8RqxwHEt82I/E2OBrZLgP6REqbBKr8XvFQPv7RjXLT65/5DgSJVbSWesNzVIuhRSiZO4dYl1FleMc9tRUt6Ivmho/mGFSSUlKvdeoA2Y0HHtzCVTTGVb50PmJJG1C2xnjjwSgiUnE1Ylf3N0yWAavL1Tv/ACVLC3YX+WN4GTIQvVBjEsRixtBZsz7PslgEUomfg5gcDQlQN9s+PzEaglqOLl2iWZ96i3CMC8/oQHt8PY7/AJgEr0BwH+agFYFGhsF2QQtOFars8Sij7LRyjwX37ijiG7VHRj7hWmMGh/5BAOzSLHsxArA5af6jsJhaDnycTO+gDBrdzAImnKp17x8XHJTar8D1+cX/AMUA2AUFm7cfvicLLbKVea+KycxoVDApa991bAKGsRlB/wCQgolWDCgB/P8AEKxRIBfDLQVPJ5uCtkvLZWzTKYkFt2xwBhzx1n+Jm1eBb+JbALcjGS8o00wkudGymq7jqBS3iHVojQ2OIZSEfkmONY3HsJSUN4lI6YrCqv2s9RJtfiIFg/XMoTZCLEBTw4ceY5OQXCWJhbHCPjuFKC12QxNTNDjMa6GA4f3uZwl3MteYs2oFKY6fnP5gH72lVeGrQ0+N6a5hLGCvOdFfvuHN1VqrNZ5fwxgRdIsGu5QccKE6efBiiuisnCqxWXq4XSMkE3KNDdvFialujLS21YJuKh7WgvjG+Ifdg7QX2lfnEyULMBycx3qigmqvWc7N/YRqIPCHvdcWcXzPaksFrCUvN/CR5HvCRdO6Tz+ITMCedtjf+YiazLaqLAtdHIx0slZscA0worGBKeKwfmU01zOPBOzuWTcsoDe7yfmEisXS2/i0PUNYEywXrTD7ifWGrkgq2eF4vuWsJLWS7o6gH0LVxxT72MMtA4Jmr3brFwNVwFWhcjXXx1AeCjI0reib4jR6BYoidZ8S+E5IsLLPdRjLbzYbmBmly0fjmWA0zZXnUVx03ai7PH1AUOoMuUaiOdlUAov/AG5TzKZBn/upaweQ8L9mIEgtAVgHVdcVFAI7QMSwOmwNwME933Kp4CXu5UUIFMFz8TQxgsKXoiM7wrQd2YVyRyGICrrh6HOtYiBuX9l38X9ypSHmNm6fO8+4HCyXQxd2P76jLKgVbHf4/iZ1HO4q5/tM1gCtGZS0DLnEpADPb58wrPWFQzDbeXzDYFOvcYLT0KplBjQy4cQewRxLkATjRf6jcB8bA6fEUCXRX2xrrmgVlHOJpCpoWQLWt1/kzhsJbMBXKm4osNMH5JCKT7ExGFXRv7hl23oUrdNZ0/8AIdYNsoY61+7gcqSLF/eGD8JFaw9/xLSBwWOI1ViYtRuICbQFwVnX1ALDBVg+OM9X5ha6LM9WXn8dwt1gMg2X2YMNxyLTakNLS6xXBGAwgQDjn9Oobll1lVWl8D1KwcIDQDFlZfyIvAZdAbH3XMQFdrMwccb4iBFVKUNZKJr/ALZEigYCD1ZX39dUt0Tg8uWm38+4kg3cosWX11vWZnJCXd2/fEzORcJY36GGuOWxx5+ICfNYNNNZNj+7jLGrZjLwUGITV3k09Dn1AqKo0WliqZp6WN1cPAUgeOL+o6Ao24XKY7dNVsndmwf3zGiAskZvhar6lbdpvLi2tcQdNBlBnVa5Jhcst4t2lnziXgFKGX4tpr4lZpQqBTOM/Gu5f1Rac+LxqooKbg0C6+UQexDDzmXoLlo3AmMuKO7r+5QL0MvcWuBDnnV16YLLOZTe3P2fbGlDOcp9xqIqxZtF/wCEcw831KocQK++YBaTgvUfWABzbtPg/MOA48brdRdUstLv34lgBb0DL1zR+eIqkWp4Xjxj8zDqGAFrbmoomFwsZqNJ8ALs5vyZiomgnarKgNvSPK6/54jkhMO6O4FCPmBi7Pb4SEUOhr1zwsRK8UUvpWGh0Kvq2IxjIW3z4ZPYS77NFA3boK5SctKzv9HzLe44DKdssKugLY1y/wCQhV1gMlLuzsmQxi3yZs71BbISB7FO+IipvtRLVy0vLeZRbjXaZ5q/EsQQWqtJ+W8xEzTTkWjTdj0mKi5t7x1Ybar65jtC0sAE7vL1rERXBgkgXhvrEpRIU4vrvxDGW1AAzQvLnvqYhGbind3sbjksBWrV3V4yViGXRYBGXOvb8e8F4iFUnmnP/tYinjiWyZs/pj0XLfIDivBxGnIM5RNqzV/nXUMIVosonbDFmeImgG3FlKwAQQTPcSFDglwlmOO5lqURR4OEmeVnRcvx9QouU2homcYsiooxNDF5xT0yIuYYVrpDK4Kwtgxxn0KJQsoqdPJ9RbFVA4PUCK9ysFRCLS0KD1WyVZWUsPLSufH1LIcBSs5vMK5YZ5cxeNgvKB2t863LPI10tl5hWKq2KHmjXb8w14gFwDFZqqI7wF4Jefs+YzQdwEf+sa8xTjghVbNU1ovMBZaqYx8ufUpzgg1pZn3jUG2okyrSsqVGiXXB+/c1EA0ufb+YbEtHRgrmZtplXZ/5ZMYgCF5dXdbqvkg0aFDgT+NSpFpxNcWzDFhQaCP4IkFyO3/kXEA4KolZhdhn8StOhxE8XiHioubIbXv9I/ImltjVcKcvZK8ErhQqsv13wkoM24UrljH5qF2mjjmvR8/UFlIhVd3RAXSFaYLVG5QZG6UVWH/Y9hUM1qv7SgKtn3DgMHe4bSBGCCoOUi4fKXXyQqQ1BrY1Y9y7lxg4ZfYmDQfk4zaccylrLrfm2eCupoloVB9aqWGnGzDzRzfFcxYILaWBnI8vjUPEchTSFNi28H1BXWicB00FtHPuY3NFHpy7/wAluVAWvb38sx0scZEre+Ou/UACCs0nHgMW/UWMDiR5YBam+sxDNbb4A098efMojCLAk06tajexO+Azo7a4uDQsMDRTZQPGnKYiAVAuQNgaC8zFWBaCc1VoeOKvq4tk0FG0NaRXJnfhl0FqJWka2CLftTPFaJ+4WHGz+MiNSUNtusCzd/esQYVFhAozpKbur1jiHmIKgEq6ppVUVZctAhgqFw1y/fqVUkLtK5Htr1BVnCqmltaux45hPLg3bBWF6de41Z6wrB0xrGV5AF5XPlhYDgTYq4vxmbS1oysZ7emjH1gDkFAh5U1cTwYywhBpqleO9RRPiFPA+uf6igl0W3wzCOuAjrmvmGhxKBCkSb9LaZPrEJBzZO3K7CFqlXY8Z364gWCASGHXJz8y7whnRlneliOjyXgbqu4t5Omjksv/AGItXUQRvRjxz5ll1YFGSuWXQyCZrPUoIvIub3RUetgWBpSP+x9IYfH6y5KJbd4JYgVMDssPqyMugBNH72RUqDLDwPX75lthKRMZcPDX4ljNGH5efqNixUO7bb+bhTZrhXR7/mInIWVD287hkXMHHKHxMJym0PjnMBIdza6D/hr3BIfBoHRz6i9RYU25fWoFOVgTktA7rnzAthU0C6gpirhVk0/u/iNCHfuv/wB+mLYEZKFpojs5yWcXx/kLungVuP4AO2Y1YeFiACig8wqJZCTtwioccRfFGjotlmeoSDVqNijm3wbFdR10oDJmsXlz+4hcS3w0yiKwiUwzg3cNvQdMit+XG85hBU9ZXe6vkel6qZYsTsms89Y1QalVK3KTPz8vmDzwugKtpsMbvUSULABW+jTr2D3HCOpppX3b1MPRQ5H8dfErFqyX+FG+yXIxEtAICLoLoLvxMkVm55yy/thAaM04fxXuU1IRYOLgd6rPnmLG2kTD7Gv3cOWuVdPBfGYSQMRB8KaPV/zGxAE5jWKfHUYaxcppaVU1m8XvzLEg6UooLQ2aaPUWVaDU2Fw2cWY4zEZWGlD32IbRrIwPkb7fzORDgVY0G8X9lypFilreG+riEgSDRSvwt4fZxFzd3LP5JjiSQMtZ55ruuOEpVqqpCoeKPzD2oZpSbaHRu/cVUlYJFT+evqBDuXlm+h79zZjhbtw1i/6pxdS6QMl2xa/qWIABMF15f3iKN5awwzDEdAn1ULwUVO7DbHIsDwPkOeKIxDGHitfDeMcxcFrKZP7eYpLItqi3j1AZMfjHEQALk2+IU3GAPmh5YY5CBux31K7pUE4D+xlzy6le3/s9ddHQcRqxYHyRb+4NNV1BxWD8XBUihdCu/IrrG4ZqbRKs/JumAFSI2OzxDxiWXJ56uCFvc75mLXbZo17lQEgMKpS/xNqZBYds1AzjUtRUc0EO5R06v+IaX0bgM3FDKMeBgoOIRgsxvr/yZ4W413MLyaSDYw4m9f8AIuSOTrs/0QqcKuw26yGpWC1mi2eL/wA+4pz7IbL/AOzPRthTrnOvzBc9HbBCM2woWxeN7pErL6mFPIL2y8NHG3HzLL2UIDPil3XiLHBYb6M3q86aiDdJSFF3486m0l0EDSoaJ3z4qO6AyWgPFxwDDYJfuyUEljZDxeOc/mBdddRxjuuqhdezBC4Ngix9KYx9gEY1b8O4D6mia293V7/dopJathxeNdQrB49Kr3K9EkoWC/o98R2gMByBhre/xE4ibtlQc8+4c3WAWQBdEpLx8Rue83d3+YgsNgtSbcUOc5uFafAXIdH4/wBzCJJUAUWVwMOM4r7foDUttGDDr48IYElMjWBdLzv6hcyUqG+TpNwjcCqM1m/rHqCh1gq0l4zety2rpjahLqnrLFSPXtiZfkzDFtHZ3Rrxr8E2NTjlCrdlnwXG8SQK2ZbzCbBRzyoay9G+viHaGU3peE6bl0+ltqxfiI1xWI3Ze7b3S94gQWNdbv4+HvfEKGZwVGPMamrWu5CvIQcJa+a1M4pYjmKMYK/cQo94OF/5D/WLGEpuuN4gVBtt4wi1n3AFaYS6dWfEvAbC8BPN8ylPDs0P7+ZiShaM38wpwC2YTNHqWIAZzReZXEuKaclx3giYqq3n97j60rwqv+pnpQdg3/cuDbNNJEhrYxQs4a9RiOCG3H54nChSdxFLdmoFEJo7OPxKzF7ltXA8/wDY2vhZ1cwzaJxe0DzC4JDq8j+rfMqHK0bB2Y9V81GawJHQt0+8wbwXBwP+S/gXHZS9vwX+I30jdd0mDEHRtqP/AFRIoXbmUrSVn93BMjmqoyC2uMv1KyRyut8aS/HCxXYiAGicFOIEykqNKIXi/UYIigY0b46bD4gQAgRNKw51qosjup3Z4XTYfEqYAYQ18RkuhTAj03d/xM4GRGNetQWqGbqni31+1FVC4Da2TqrghLXcRWm2bozhPnUplAykt7XvXUfUYK7fFppwkdjUErS906HXEErs5FhcWLzrOY1oqwjAaaDWSNGr2k8FKXuEzEFRleXlo3/koyqgwmzYuAty1edSputFC9EHPn5xiouUiwBvu+uHkzBvLNJkWkrjCTBiKibPHiWKDYcPA9uNy8rCoQoS+Ct2d41Cl3BSd0d0Z+ZQsZzAsYd9hgDoodAvnnmOxS1sreQA5DHMvDiGyeQLx5isTwGcAiZrLFFzEOVnjNu9cZ8zEIG7PlfhfeYTK4wgDm6v3Z8wi0BXNpKctuJYlGwiCqb5p2cw11KojkU3R2kGamKIe48yqHRVIOtSsvIJ3m604YE2Fa5Wc0Vrz/ETFEbkpY/EV4Shtoxp5iqMZrkB4KgBaW1yY6gyocDOGn6jlhszYG3viKkU5S8ObxY3/wAhCqQWstnGufPmZ4WhBkc1m91xBeBloawqr5z+JVWb5Mwb5Njfk0f36lgHGy6H5Of6llisg5htUrHbX2qJ6AaGD36/2IFXUW58XDbWpR9j+8SqNWb3FWt8c/crFhslyHmBmprm2eyUHEAYh7P8lvexh5/bis0VFr1kpT/Yx0hQNvRX5l3csyleiZQ83dFq175YsYM2ut6+iI8U7OgD+PzEAQKJbYqG6hQXHgD7/MznAVCU2qKbUwX2y8YAFEMNly3vcwUwUR1vzxCvZldlWG7CkZZNcCOjdvnNY67gzWdg2h4/DGurnjdzyBf7XwNBueBeDNOr/qWjC4w3WNepYLaVY+1v9qCmL5S4DdKXV8xsRdhBa1WFvdtfiZdhLK9BNcH7kJVxPIO675zLQ/bBFNuHWyYI4YUtzeDXHUrFVbAbyfCAZ/biOtu68M2DvL3M8GWMLWyzvg6lFk9rYXo2VmpaaDxApOW2645YWxm9DVXzycZ/EUYQVKDFUeK4b1DWGEoJazAe+5WULGxK6fPOPEWGlobHirPi/wAQDfPTxoqhHnW9y+KcxqoZRw55rJWIpkg2BW709ymGJlCjjzs47+JZ0qzOV/v0MIHEdERyJXGribsGoYBx27i2EPgPiFb0FSRL/N/iJha4pKUNjum/M3wHa6K+i/nUNPKyrTreOj7g3YNhGhmGvPNyiriVDHFPunzLWBCojvoNJnnMCruluN1fJ+5l/UBsNxbocFwAtstWyxYtU5MEQ0e5YoTzWbigLpcKbx7TzHXlokUr4JzeOZTTbTT48QwYvAH73LYqXUqnL7FleupgAtCOxy/MCyJYCLfF76s1E2lHGaIPxjXm5TpRsRGHkmE+I27LhXneCm9PXuOUVVbV5GtBn7gTnw8wHD/JzC2CAC+nrccptoWSv5S8TJI7w3XP/YMBAkFNn/Q+phFBBXSuInKFDajWHzBFs0NNFnXfE4RaLwDfHuVjLI8l4A5lLUO8SiopeCqjjzsDnDNvOs76iUy2xEp6/uACQXAtwHOXT8eZbAFrrSyvmDjhYnOm/EBYUE017+4d66IWpxj4mIRtiVW8D49TEAVM2evuUIlddqJsvjCWb3mJswoiwprVX/Up7ybDwxmjzjwRAvHBWFxr44qHKXRqlPGr3r3CiVo5VtQXn7Zi7aFlJquLL8yhTpq+nTEqC66IJV1oHXXiH41gpiZ1VZwVxuZHdQDTea6cUP8AFx9qAXUioFDrJeE3zMK6qhnddG894lLOhscJdeeYDFzNLQs9eoiUauLlrsryahWyGCr6o6z577lfJacVDy+w+o4DdVIDc2Cuf+y2MroMBhTQksLBFsLRgLsrcGVtRLXhp/H1BpVkoTNpRhP4lu5PVQ1g3dY+8x0zIQUK5HKFheMxxhGuKXim/rzK1rDRbr0w2c1cqGOhFD0o8cy3BczUOQ128+Y+uKHRam8PHmFRaoGwe6sc/JLrIqNQRdLyuIewmGZC9I5Ov8hZXWyWIUrhDEvqaNGVqS7wH8EQ54jpV52+u2BLqKpVDkOnL8VEhqkUU7nArRDOM1mKCVUazZ38wRHWLbHzcaawOmbDmVHNFNgNRDFH5SqjsKtZe2Vlqaz3lFUAQJS6deOcQaXgwaBw06amb+sHRQL1BMi8lQ33ziKNgco5BZ5S/wA+IDBFSiuQpEr8ytVotQzVmbjW7PBp0df1HwWyzId6uVqLusu2/nEcjSosuOKt1W5YZWtFaDrthCkbqtbs3HJS2NoEbl27JK8xVudZfrvxKUg3L3n0/Ny5gem9cnu4h2StZafMMvNYytedH8zDVvKObYspglaNc+2KVADsFsznmr4hYzRWz21f5iKlLzTDi6+ZUUGQBZu3PHOsdxMBcVy2XK5qhgMJdV6JbTGzRk/2WkXdgxyMxBoGx1TQ/LuVg7OKMN04lw4aQ4HvXOOoYe8hLDx+7iFBWWZNb1n+8VKOwCwvD65/iJcQFBvxKBJsADl8YxWMaauZyQbLDGLqMiapsoL8Y06/5EsgIzd1hqBCxkL+1KyEC1S23AC19G4XCVzdFeL/ALgWhhYGa8Db/EuLbdbjlpecxLrpVL53l/2AXEUbKLwjfUPmZgKB0F/MUkTsK/PkYho1Qivu6tou84jeRKFL5LUsNPDCRmoVyNcN1ebjq5IXq638dd3zGSLTav27u4aYUUKcg2Oaw/E0dbKjL1/UUGKsGiuPTAsOBDWDYW/UthdZUlrfL/yIC1oCtfviACGDWowari/qDfgl1SYKHe45fgoFf4b9S/ioAoJug4JZYhIauVW2tuiZ1ldzT2NZzxFpS00JglItdiuoy15hT1cXKgBjlW/5jpAMclivLdRoOK6TuOHAax9M5l6w9NT/2Q=="}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate, br, zstd + Connection: + - keep-alive + Content-Length: + - '84018' + Content-Type: + - application/json + X-Amzn-Trace-Id: + - fe553dd5-7c66-49d6-9b31-0c63b97f6a60 + user-agent: + - unknown/None; hf_hub/0.26.2; python/3.9.12; torch/2.4.1 + method: POST + uri: https://api-inference.huggingface.co/models/dandelin/vilt-b32-finetuned-vqa + response: + body: + string: '[{"score":0.7786104679107666,"answer":"laying down"},{"score":0.6957443356513977,"answer":"sitting"},{"score":0.6489157676696777,"answer":"resting"},{"score":0.5639538168907166,"answer":"laying"},{"score":0.29528698325157166,"answer":"lying + down"}]' + headers: + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Tue, 29 Oct 2024 18:11:05 GMT + Transfer-Encoding: + - chunked + access-control-allow-credentials: + - 'true' + access-control-expose-headers: + - x-compute-type, x-compute-time + server: + - uvicorn + vary: + - Origin, Access-Control-Request-Method, Access-Control-Request-Headers + x-compute-characters: + - '24' + x-compute-time: + - '0.133' + x-compute-type: + - cpu + x-request-id: + - IfnOgP4gFRnO1EggoanIA + x-sha: + - d0a1f6ab88522427a7ae76ceb6e1e1e7b68a1d08 + status: + code: 200 + message: OK +version: 1 diff --git a/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_visual_question_answering_async.yaml b/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_visual_question_answering_async.yaml new file mode 100644 index 00000000000..bc1e1b86767 --- /dev/null +++ b/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_visual_question_answering_async.yaml @@ -0,0 +1,1940 @@ +interactions: +- request: + body: null + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate, br, zstd + Connection: + - keep-alive + User-Agent: + - python-requests/2.32.3 + X-Amzn-Trace-Id: + - b0bb406e-348e-476c-85ab-36fe49abadce + method: GET + uri: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg + response: + body: + string: !!binary | + /9j/4AAQSkZJRgABAQEBLAEsAAD/4QB8RXhpZgAASUkqAAgAAAACAA4BAgBHAAAAJgAAAJiCAgAH + AAAAbQAAAAAAAABTaWJlcmlhbiB0aWdlciAoUGFudGhlcmEgdGlncmlzIGFsdGFpY2EpLCBhbHNv + IGtub3duIGFzIHRoZSBBbXVyIHRpZ2VyLndyYW5nZWz/4QV4aHR0cDovL25zLmFkb2JlLmNvbS94 + YXAvMS4wLwA8P3hwYWNrZXQgYmVnaW49Iu+7vyIgaWQ9Ilc1TTBNcENlaGlIenJlU3pOVGN6a2M5 + ZCI/Pgo8eDp4bXBtZXRhIHhtbG5zOng9ImFkb2JlOm5zOm1ldGEvIj4KCTxyZGY6UkRGIHhtbG5z + OnJkZj0iaHR0cDovL3d3dy53My5vcmcvMTk5OS8wMi8yMi1yZGYtc3ludGF4LW5zIyI+CgkJPHJk + ZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIgeG1sbnM6cGhvdG9zaG9wPSJodHRwOi8vbnMuYWRv + YmUuY29tL3Bob3Rvc2hvcC8xLjAvIiB4bWxuczpJcHRjNHhtcENvcmU9Imh0dHA6Ly9pcHRjLm9y + Zy9zdGQvSXB0YzR4bXBDb3JlLzEuMC94bWxucy8iICAgeG1sbnM6R2V0dHlJbWFnZXNHSUZUPSJo + dHRwOi8veG1wLmdldHR5aW1hZ2VzLmNvbS9naWZ0LzEuMC8iIHhtbG5zOmRjPSJodHRwOi8vcHVy + bC5vcmcvZGMvZWxlbWVudHMvMS4xLyIgeG1sbnM6cGx1cz0iaHR0cDovL25zLnVzZXBsdXMub3Jn + L2xkZi94bXAvMS4wLyIgIHhtbG5zOmlwdGNFeHQ9Imh0dHA6Ly9pcHRjLm9yZy9zdGQvSXB0YzR4 + bXBFeHQvMjAwOC0wMi0yOS8iIHhtbG5zOnhtcFJpZ2h0cz0iaHR0cDovL25zLmFkb2JlLmNvbS94 + YXAvMS4wL3JpZ2h0cy8iIGRjOlJpZ2h0cz0id3JhbmdlbCIgcGhvdG9zaG9wOkNyZWRpdD0iR2V0 + dHkgSW1hZ2VzL2lTdG9ja3Bob3RvIiBHZXR0eUltYWdlc0dJRlQ6QXNzZXRJRD0iNjI3NTQwMzg2 + IiB4bXBSaWdodHM6V2ViU3RhdGVtZW50PSJodHRwczovL3d3dy5pc3RvY2twaG90by5jb20vbGVn + YWwvbGljZW5zZS1hZ3JlZW1lbnQ/dXRtX21lZGl1bT1vcmdhbmljJmFtcDt1dG1fc291cmNlPWdv + b2dsZSZhbXA7dXRtX2NhbXBhaWduPWlwdGN1cmwiID4KPGRjOmNyZWF0b3I+PHJkZjpTZXE+PHJk + ZjpsaT53cmFuZ2VsPC9yZGY6bGk+PC9yZGY6U2VxPjwvZGM6Y3JlYXRvcj48ZGM6ZGVzY3JpcHRp + b24+PHJkZjpBbHQ+PHJkZjpsaSB4bWw6bGFuZz0ieC1kZWZhdWx0Ij5TaWJlcmlhbiB0aWdlciAo + UGFudGhlcmEgdGlncmlzIGFsdGFpY2EpLCBhbHNvIGtub3duIGFzIHRoZSBBbXVyIHRpZ2VyLjwv + cmRmOmxpPjwvcmRmOkFsdD48L2RjOmRlc2NyaXB0aW9uPgo8cGx1czpMaWNlbnNvcj48cmRmOlNl + cT48cmRmOmxpIHJkZjpwYXJzZVR5cGU9J1Jlc291cmNlJz48cGx1czpMaWNlbnNvclVSTD5odHRw + czovL3d3dy5pc3RvY2twaG90by5jb20vcGhvdG8vbGljZW5zZS1nbTYyNzU0MDM4Ni0/dXRtX21l + ZGl1bT1vcmdhbmljJmFtcDt1dG1fc291cmNlPWdvb2dsZSZhbXA7dXRtX2NhbXBhaWduPWlwdGN1 + cmw8L3BsdXM6TGljZW5zb3JVUkw+PC9yZGY6bGk+PC9yZGY6U2VxPjwvcGx1czpMaWNlbnNvcj4K + CQk8L3JkZjpEZXNjcmlwdGlvbj4KCTwvcmRmOlJERj4KPC94OnhtcG1ldGE+Cjw/eHBhY2tldCBl + bmQ9InciPz4K/+0AnlBob3Rvc2hvcCAzLjAAOEJJTQQEAAAAAACBHAJQAAd3cmFuZ2VsHAJ4AEdT + aWJlcmlhbiB0aWdlciAoUGFudGhlcmEgdGlncmlzIGFsdGFpY2EpLCBhbHNvIGtub3duIGFzIHRo + ZSBBbXVyIHRpZ2VyLhwCdAAHd3JhbmdlbBwCbgAYR2V0dHkgSW1hZ2VzL2lTdG9ja3Bob3RvAP/b + AEMACgcHCAcGCggICAsKCgsOGBAODQ0OHRUWERgjHyUkIh8iISYrNy8mKTQpISIwQTE0OTs+Pj4l + LkRJQzxINz0+O//bAEMBCgsLDg0OHBAQHDsoIig7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7 + Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7O//CABEIAZgCZAMBEQACEQEDEQH/xAAaAAADAQEBAQAAAAAA + AAAAAAACAwQBBQAG/8QAGQEBAQEBAQEAAAAAAAAAAAAAAAECAwQF/9oADAMBAAIQAxAAAAH5rncs + dFHOOxPZnoC1NS9Sd3EONH5lWWwFNk9SN6MLGX4lOYu3nddL1ehyzZzit2LrQ1SVKdHjzC32IzpZ + skq2TOumamXKoEZbsDityPyl+uB3K5N3TzRxH2Hxc/vqe1VDoRXyntzMgtsuU5Ttuw5/otfGdTyZ + CpPbvRuVXMeZjImSqsm6VG7iNkKG5rhdoaMk8mKYecnlorVTqvzKcwpUaqNt6a20c5HGW+aH0g1L + 0Yp4O1ouhdyuTT3Gl2IbqkzhM7TEyNlXqqhu3ob5kPpsuqOjYszn2Qkm9dC5VEN27Kfbu/Py3EXp + P7NEMyfluQWLswCBpeqvQkdD8nZK1Zt1uc4ZaannNGMjU+tYbT8wVyE9LnTR1uYmT3ONzA3FWjDM + U/RowJgETL49KjpehJmJlmYbx0FM6Zo6TKlmy5s5k9LP0MkMl3oTSqyjywsJ+t8dXxyLveN7r1uI + wrSzNgbMzMACjNJOmgp+ZTg6QFXpshU2BtFHZBSK8auU3MdmbsrpteteQEyQ+MyxNZlTrQdaUbML + ArwGTOkPndBT3C5mjo30xkywlu5FVm0ZUbygk1pNpyCVZVeaHHrDwXuy9hZVa16w4LMGig8xFJ1W + plJtAOChkHGUdmb07W3YsswMwFqqdiHC9UR8lNg9NJ1sLBk8HnSsZUxstnbSwYPz4T0Z0aesPUzl + r2Wbe89GA7PaVJlRZ0jpcL0ySK6TQqeZlNwt4T1nuh8Bue8+m84Xp0cFmHE+mpsTbHq6JNjYyAsN + WSavtK+26wFXy6T45qcgDac2rQWfZm6yW6Or63M23HW7eF6nJzlOMVddeyRRePId4HRtM1n1Dz0H + O51Z5ydvWaGMuk2glem+aTdg87D3vg48aeptdKY3LfJZ+qLrrv8ALI2jkEqNGzIUrZqgJo4FckAZ + TAFdqdLp0wcRlHHqXPNGqnZOdRaT2XRZkGsJuQV2eg9eYWXV6Tn5w1RzJN33OZie7QkHb1k/m0y3 + OqPFfYUngsX2Lmm1d0wmWfWlRPVaeqPOh0CtjqzLOcHrU4Dq9bhJ7RoVyG5ynVHUdAgmwq0a00aa + j1u69IbpaHF3PdfHrye3OnOoN46HLpyN56vPc3Xm3NXXT5b53bkrpi/U8tUkni4j1uJD22ck/Ob0 + m6ZrPqTw1q51SZtuciIzrDpdJPmgZi17wvVmxZtVelrOxFrQwVnUR0zDnS+dCutCgKZpuG5gUaPg + oULXLfagUcPkMyg6bzfTZasaoxrkWdaV/blw5Xce0+s5Yuzr7xZy6caXo2I3lOs1U2Zl8/LInso5 + 3wpYvRG7m2DoOdJzpdbVEzLNLxbtZCpFZlubTYO5PbPKrSiGXMrWmJtvcZ5Mu8qeZTpsjKYh5vo8 + mjMzFCgUKHpMjYbDLDscvK69u1y7KETUXTn93vnLqSc+nE4dwsYUx9D6fM3U+f4dYJutMsi68w48 + l8YzmX2mYr9UcPdIaFGaK6VG9SHU1n0TSzYt2pzs3MqtnooVoyoc1VtPNvTKlYnjTGggsL/NC7x0 + lUmi5RrNNhKtyZEwjrGalPNNb631j5Ksh12J3msCh6c+rrKZe1c/NeX1CErk+j9Xl5K9Q+Mxvr8u + lNkW+VeuPL4JsXLnLbqXDQcQaHVn5nehLu3JmTIyknKzrdOhY9IVyyOa9KeY1fdMrCPVkqxS15mH + X45dmO0WqVUgm26NyPEyh6FhcRbZ0IViXcQ9O/L7a6ONlKHo4OuelKedfJ8u1vPbFi3j6XtwdvHD + pfPpdy6c/eehjCNcSxnn8rJ2ZVWQZJ7xuW+bWbiw93esSXYWIWnPOPd9LOXyHtybqmyHOszWIVL1 + WMhb6TROdaU3PrOrwbDICPVuouULaOMZ0DiL3d0WZwPQupFr8mzpw/V0cS6nQ576XTnJVyUc+nzn + LrXnTlg1ntdeLO3JKozqLNSV510ufJLlBKnTctoY9ubRca+5kzp/SWayuhWfN1I91kVSJrm63iW6 + ks1JLYy6JdU0E2E2rWnD3SMk6nnhjaGFQ3UFTF5p5jUXoYGyuQudb2lHQc0F9PA6569zyVtxvdYp + skzotSLh3tzqmOVvNnbiclG5z87ugd84pv6vneM5T8+SskkmqvpdsflmbsMHl+s8u0NxOLmgr3M5 + WnK3qFskdQi5aGStAxMgSTWqcF6jQ07XnyA+EZrNQICWvQKEYZBIyyTF9wre8q2Zes++3G64+k1j + jTXXxrk8+jdTsWfF2X8erVFNOb25/ZM8Wa9nVvbj09Y5ud9nN43PmfPnz+d5utZqbkvvPavWzmXn + qfFf2maLTLPUvNOOvM5XNuoda9LfMRNLtpmWywb0TOiK2XUGnhydLz5cBwvgpGdU+a/Yz1qxaOSr + U8qORtnuWq/V2Z2vB1Ovcc5foZflM7u4d8l5esUzVUoJRLzd56OdM1lffh2N84K5mdfTZ0POfPef + j6JOldYEsXRN0tucDm+5kejWpdYKDAy6e5Wu5l1Q6WaXpZxzptWqySrm9LzfQqmefrbZGsnDLdjp + efC+NKPQ3IvRFYrIf1jyLBnQvFLWX5uGwvjro+/rV2z86vds+Ql+4Tn51Ny7TY3EjlqjRyzanod0 + 52duDtRZxJfqYozflvHjn4y7tDs5vTWZpZDZuaeIHW87vq1LLPTKc2fNeMijobXPUsZRjS91snkX + aqqcyPevajczQ1Lmu45DB0FTMk07rHYjLfZS07LclwGBdj7adWj19at55ddCT4lq0uD59F8e0VhR + ZK0eSamyl15S9+NCWlpWNzebxny/mw3rEaSar+SXevWGbzL6Jt6KrbllBmJ56GD0PEHdu2VXOlbg + +ZRu4i7U24MH5yKhbTiU887l7nR6R9ewZ0hcmDdMs9yo5MNwb6ZZ2V76M1sdOdY9FSieWfnufj3G + wEqlYFU+slnVHXlZ248VfqGZbZTr5sfGfLebDLJetUoYI7V9mx7k9QaorRqPsVpLKObUzmXs0kbu + s5TKR0s+p5VWz25YyGR5DDSrjDzV0XNuxc29TMwUPQoZBYe5MKOsZzdP27f06RVLqco+jJ4lK5rh + 8+nWzeH159bn1ZvmvO09OR8eze/ENY7pza40qpfoksxPk+PNOXs63lJu1l6Wuqs5htA9tmaR6lK3 + MEzopxM5N6Ojc8+am1a8mJzumijUl1Rr0rJPVZI7lNjNC5i0DNKvSM5zdjsbzpZm1RYdDh0PRvod + 9x28yzjW1J1QRcsOdU5vV68/mZu7j6J+3m79zxufSveH6zOfPS0V0JfoJK0+T45jzkuOpxHQOxy7 + TNZkUrRjy+zMjNUNPGWUZPROK/obIVk8vsF7rAKh1V2nkvS3MbyejaNBXIZBYbMlD+kOQuZelVK4 + jS/vet6+qFl1OOLO/HFp8qCs6ljLPmZr6m5hl5cu6lJRGHrOiNiHL5nllXOU4pWS6oWq3JVt1Krm + PO0wWCdVHRRiBp6XEPVyt5DgNHs5KOb42PVJ2BXl0qzk5sswdVucjijswZkOclge806mc6vm90jO + ajcr06Hq7t1OQX2UBywEtc0pOrZzzir9Okiwnak6VeNBFjY5eXzHFkz1uOV5cr0axQ0LJOjK6u8x + c7NaKq0xGYBqq0y0pK+ajDELUitNWYmarkEltzZ8glU6EjcwLU2hYUu4nmapEK7Wd5zJdxMhup0b + d9nW+yGrUqjypEnz1cte4ilcj6gEx3Tq2ETAAy1S8zO042MInPeWeb0ysfdLE7npZ7LOMXuDU26I + xMlGvRpVkWI4ezzN6VbdzhWe1ZtViO4M2X0X56CajrZ9AkXKFlGM24jek5c0EjMxgPONOp6Kvt0k + EFdnQFxNUcJXoWdxBPG1KfPSkvUuTBlQdHOsztedRmRtbDVARmszU0wekmucLEl5tsxSsltUrYWb + FGWFvLKulRtbk3ckxqHZw7lCsTvXdu0Zm2no8fWHomzBzH0zaKVsLkHEZrNN3T118gvcuLBRMTL0 + 0eNsMeLMJzkrgmJ1ss6R0Mbi4991lZFYUuo3OkZqa0szahxzd5ZqLvKPXnUKWBU3TIt5QNOpwzNs + rqOXM69ZRUCy6HBjgU+j31wYHYmVY5AXwwNCMNBGGEfPrbcydMcmzoJ0QDTDLJ1A0ckkqAVQKElx + 9NHL4+imCpAFk1MzV5vJ1mqWTWZN5ozfoefSjN5PXHek4O+atcMiBqa1kdHjCsb55z+2k9lPJ4C2 + jcTouBmikTu0p3r0InBGjBqYoC5MSq0VElGo9ZuPa9LuvLianPs7qVEIIFej1ZHqXDCSli5WH1CT + 8+vP59PS12YTahkhzSdOlz21V6zBrI13OXRWst1mo4txLriI6ufiuyzpGZiLQV8gxq06g1gfMlR6 + Mj6nXQUYJWhGV6EqgUEeRyknlIyI+XboXNusH0x8wpWd5GkVIK7MFxi0IglWYml+rQs653LrJnS7 + aYbYInUxJT5/ebuXS6V8T7zLrLc66EvK64+pw5t1kzyNcqbgk5vPWSZsq0j2WJRuvRIrFpwCJugs + vre2yPGGKAgaJEJQSjFvQg1k49qGZt57OstuVV8udbU6IxELqaYrUmqaFLxJr7O5PNlxuXHTmzVW + aobY2xNLs8kcvD3jp43dKjWeP0x0Oe+1jXH68/ozmzV+aR8pvgLJWKlbJ7U9KmF1XqhEtruYK8Fm + N5zvejo+3xowJFLqEAs5FM0yjq2p7HRnPVW8e6Yls7CbXBWSzrJeGYCvkmqOHrws6+hsvRM1NLx+ + XSnn0Bcsm3no5JrResqlljidefVxvoxwOvN819Hz17pzh07EZy6T51PXz3TzixZRZu2IoRyRSjbm + TUG3auZHLta6vtSNMPCAStFqRiKGmrdBoym3Pzc12tYsVJ8gvfsrR4wBVEI5OHNdKO5qBHOWPG+j + jUnLqlV2Q7z0c00k0skn1JZXx891x1Ak4up9jm2axzq6lg8+kPPpz1Ynz+/NRYmVWTQanqhESlGU + ym1ovN+l3vxoYJgZppgkMw6FlSYcuajTu3L65J0rHnzU1bc3KxDrBUqyc5Uv0CX1w5efL9Qis6nx + 05/Lpz6HebcVtgi9LGU16X1z850x2Dgan1MXazNXRQM6lxviZ3iuY+c1yGKJMj2rlyUimjkVqmeU + oozPo+nTxgJpoIRDL6mRSnQr2NLKNZbrHz7XNj6a5trnHTsWvzJ9MmKyzIACuWSZ19Xc8OWSa+pu + WiZeby68vOuVt1eemy7Zli7LB2sosMo3z+dX1hHY1lSuSyVsfJc+yl+kxfj98JtNzMHs+EK+QdFy + io1iZH1t67Y0SuwVnjREpD0Wr8bnzoITXU3yq68+JLw1+1uH1KdCuBNYz2Fyw5csiOfL2iGEr9Cj + AM2bO48bRLudxLWg2T6zbEe50Lnrbx87NOS/eOAQr3rkzmr9NAHy2Oj+PWbU1nlb89GZE0u31eqv + OXSTqMrUHdyOzvdss6tQzx4IMwwMTw7LqSUTqb59DryDWeHNKT6ewQqE+Xzr6S5RLZU9nKlpOic+ + Xt2MzRln59JsbXm5KzUhXTyZvNAnUy56G8dW5+WmmWUXPzrXVudHL3E5APHtzM7QdiOLeXO1yXCd + 7CunnJZyiUZXJ7VboRdejjxhcaYeMBHGZsfHspUjEq1mvWen34Jrgy9RKqliiuPnVespluThKcva + udWjNTjaMbOyHn0CXZWjbk9TnzVCHvHtTo3MGg3Pa1lqfPNVpxaI07KUnHXrYvw80M32eXS1Pl9+ + ZCZrXqtziO32bsg2usGmWdudSG2imhGjhqKVMNmoePUJVjw0Fat46vo8/O1I46dLldYs50rC9PnZ + v6XWKc1edS43Pz6LUhOb4aEMs0SP1HbxPrPb1iegs4i/T3Inzi9FIBxVA1xJrpp8/wAu1HPYHRl5 + PTjDvHpAjDbdTYNPUGVMvU6bYVgngDxh48LFy5w7HKAY8mWpLN82+rz/AC8vTseaYKlXL1JV8es+ + Nrso1nJVzQS6DGGm2CGaV7z0uvLkWXWdGz1nz6rl+hZ481HK6mJlnCOosJdw6/M9M5Lfjd6R3ly9 + xkiawZLqYalMp8r2e+ngixggWr0iJpRpCdDnunl1wcMJ5X6nZ68W9eXxds6X2NXUCXujOXTm8e0i + km05FS6pHj0ZYoAaMq/WL+/B1c6jTpkR8hN3Zo5tFhWPCs5S1JWcWWvl1jzo66G+dWsyS8eF7zcM + UESJhKLj6O6AwA8aOOanH1LM69LNZXHW8/fF9Br6VydDtxt9HD5bO+aDqPimW+zpxzPN6Y1WlB4O + lx408OBFgADtS1Op6fOyznVy178lR89nXJzspVWOGJVSUWdesiPG+WFrN9OTwRMs8rgwwh1igomE + IuXAbOlLzrEGiZV007nn7hKuXB8rynWC9fllr5zOqVOFHUTOHeaUVXZfLqCZSk1SjbFQqsCA1OxZ + 0/R59lWDK0Oz5XOopSUzTQ0osgkvW61qc8Xm+sFTy1GasYYwULDiiopSsaNJzyACvUh5LUAo6vm7 + g0uV8ttzOP1n3o4fOayE1dKywI7Oann0VLqyZu1Ses1EWSakVgSnDlYEiq7VlW8FQwoOyYhJllEp + WOlw9Z43Km28GzixdKusxGQrpTBJ0uorIpWSgErEeTko4dFamKqdJDqefsrO2S6dPrywZrLemPgb + NXoRQtVx1pZefRKzc9qVhiPsXZL1516y+vnM6eX2NlUkiuhyZWEtnhoUYopMvrHGx4aYKlNAryvC + JD2MnddLjyi7bm3Rlyzw3NTTRpSYc4MtlYUpFU4K9Lh19nWygW9OdFlO+bumPiWppWDI61zbZPjp + Py67msrc2SwrIOvNW8dqq0gXkS3SC10rnnmCVwFCChhYKUE6G5LKnJBo1VWHKw8ANJzrkccmK+HM + YD1dCADJUrVQ8pBWVDNiwypqJAlq57bjbDZWazZ05s6c5tT5jG5xkNLNTqmcugZ0QwjzppPrPznb + k5Xx0rKTlL1IfZwJdsaWgSpFytBsIWKq2xyc/N0yq9ZVLPnVBoseGkCvI8lFkn0mpxreardZmh81 + gcr6WmGiilAXK9ICujq42UpgWHrLevPnnBzRUlFLk+gsnzp+dADmjovN5XTEVjCgWtCMNh1QFNbA + L6AEDZao9YnUWrkQFKKZVeoInFFXCzA0wAUrM3qah8MK7Xi2uorkc7EdKyxCavXskWOPIYJzqpj6 + SweXV+8AN3hNfOSzRspqFlMvZuabAl3OhpicuWTUOXyYEZWASssEOxctUricBJwaYUAwlXBgoKrs + LU3GhHizATTQRA+OwnN8+I/Tuzz4V0u9NgYnjwS9BCXkGmnpYdZplbc/TQuhsafOVys2gYuS7Y+E + V27lw0kqHKFq0plUiwQh9bKixgtPSjVAgXBKuzQooF06CWdDXADDx6zD0pig0BZjYEp5Zb5816yO + Env7YCnjVYTJ2F54J4ms2UUXZ0kcBby4mp8rYdUqVTXgETVAqwkTNVLZCDaJFQ9SsJVIkceFmhKM + mqZqCpJoBhVXOCjKplnRwkYqAAhxMYJzPadXw833I5sXv7aToYakeTVMxEqiymVIo1BoTwQY2XQE + aeVVjIWFaSEMlMw8nqnHGjRZZLFWQmx6xoARcvOQzTw1WpsKtenQljTnV9EvEmV22ywJtowoIyR1 + KxOzw5vxP//EACoQAAICAgICAgEFAQEBAQEAAAECAAMREgQhEBMiMUEFFCAjMkIzJDRE/9oACAEB + AAEFAvAECzSazWEQiMPGJiYiiCZmZmFvAHgQRnljZiDMRZ+GjTWYxGHy+0yPCnBezJ2i9zHaiHqE + 9/n4wzQxYfBHn8BptiZzGJ2f7hiuVgInXgPh4Zt83s+E467NMTWawCAfxMImsxMTWBZrD5x5xMTM + MIzEGIBCYWhg8MIrYCjwT14r+z4znxmZ8e47AzMD5hMEz41OcwGEBo481jLdToT/AECIh2RzgHqV + /JZSmtcLBfI/hn+BHgQQCYhEx2Oo0wfIEMMEzNp9wiEeBDGHSscZj+DEHX8CfHcb/YhG0xj+CiDE + cAjXs4EwDGqBmmIFxPqO+YCQNsyj7071BBq1YUhrJbdoGJY+czPk+SIZmBorzeE+R/DMzC38AYO4 + RCvn7gGPB8fnyYcTA8GAZEEP8FOPH3CcRiTN5mD6YhYWz/Clu5kzMq+7b9SuhgqyMeAJiGDw38GW + aTX+Xey1WZNXxZCszDAPJmYjfwPnHRPgDszHgx/OPBg8mbTaKw1Now7Qv0iEzEzLP9eMQSv/AFM+ + E+II7WsA9QCYgEIh8/kDMPUz/Efw46QkRvoNtHrxMQxFZ5oBD65rCjLNoD5A2KUgQBQpqV5ZT6ig + +Z+z5I8AZ8nznE2yfH3BHEFOTriY6x3f4X7Mr+/zkyjt8eXs62doFh8M0zBGMxBGPnaZ8AwmZg+q + +qa2yzVd9I4IM/bbT9piPTcSaWQM1an90sS7WAM6ni5n7awRai0GEjWYK2HKsTOSm1dCQjtl/hj+ + B+1j/GExV68HwTFadYaa6x1NjMmsr1YNWpmhmMQxJVkKxgIw/QXk6jMYzMMA8HwBGSazWEQLCsUT + EVMzHVag8U/GC5oW2CL7K9/UG5N911lwSocXk3ROJWgt0Fvqp5Fb8G1J+5aLzLUYMtksQ7KtcWyu + sFstd3xVJAI7b6I7li6wfXkxfu3vwPowmZij4soycRD3nI+jn5ue1bU5zD2DOp1KW2DDr6hbQe4T + aHwO/wCAEAh8GYn1GMUQDwe5h0hbaet2lVeI7hI1jMaeL7F5jV8RbW/ULJTTdWGodQvEOHXk8Y0c + pyb+GqxS1NwYWjRRPSQ1a932rHfZtumg0M0lgGB1MTHhh0v2x7RczBjeErGGth2JE7z+b2xO433K + /qH7CwrEZlZD7K7HBbXcftgZ34CQLiYms1mPH3CJibT7hWAwTPgdTuvloSYxwQMxf/r5Faai7k8e + o8q2yxQzKmbHPyUqLS3vWvi12VcivnUGptgIG2j9RSGPpYFTqS0z2xlZbGweaT6mIUmJrifbKgVc + SxYqZjfFfFC5axNZ9xhs/Us+8RRgQeMToQ2sibbLU0BBAWBYBNYYTNpmE9qIR0ZiAT8Y7ggMEKfN + mVVRTbZyg7Nx3Ti2vyL/ANpbQlS0VJx+OX6rGtGuRRcUPIVeTxuLxmUcuwKKG9dmjVz7AUo69DkV + 4cff1Af7MYRQFHvXGQ4D4jvNp911odiRAJ6yYtODyV1WAbFBrC/WQWYQkweFP8Mz8uMzUiDONYBB + 1PoZjGHzp32PGOsRhB9BcwrMRRFEtsVVFzXWpyNbOU/qqX9Pe3iH9NvddCZzgX4uDhegYO3/AEwk + cVVZKbv07nOP29nCbkOtSryfU/3DcuxhXBdu6WxfZ8WGTPyMiZzCMLjrRmGL1BpcyqqbATbM5f8A + nEpr1AHd4HqT7jrq2RMZgBWBsTEPkmLlpnrznMzB3NIRNewceD4EPgeNewuJ9DlPk/p6Eta23L5m + zvx77l4w5lt8q5pA5SualzFWGOcTghhU/P8AhV+qEWfqdzWrzWJoty3G4DtH46Bj3PZgu+kFk/cF + p7IbKoLKjP6pgRj3T/poX0UOGVjAZdZmVVdYljaD2tFMHYv8L9Rx35Y4g7n+VrPxg8KI+BBNvGIR + B4xAIQISIvhFhhAnMAz+mV/08VPbzeajTh8g8e0UWcJ7uTmvg8/Sr/qo9S37v5mP0+rk8bWmr3cm + zkX8m/8AUa//AJV4xt/Sv09sPkY5JOBUBLe4XCr/ANNkN9ytSW1OVX5K7LBaM5XDbEq3R7gJEVNr + QuARLTm0/YikpLyDE7iw9RiNfxGOB9xRqGMT68fnMY5g+mmYPrxnreDslMxhiCCA48M05aT9PA/a + fpw1uu2PHRxya0u5VEr5jFXq1t1AgzgFsskdT7GvrSi22ywPYqUWqR+kcHDfp1VSLcJbxv8A6Md2 + V4jfeMHXEPUq5Gj9Q9eAQB7pkYxMTERtWDhhvmW/F7UwasZcBqsLnXuKnsliFDtgCOfkuITmYif5 + mTB9ZzPqZhXr8wRoT0IDiIcxq8wL3jwANVrE/UDP0sH9r+mkfuwihbaTxOWdM7dMM2AZgEA7xmWp + h63yoZSy18W3l/qP/wCDi2Ov6TRzS71HM5C4DXvs9my5JlagCz+wtW00+Rby9ndJG+gzD4yixSuW + 6lnYX5DGtnQlnTwHqk/O9hsftuk/OIB4TuYg+gYhxPs4m3UxFQwgYWaeA8F3WYTEGWgwBzqtmQer + icO79tykvrdOYtlp9QV2OYBiLPwsx1YO6xiZFg4VBRud/dP0o5Xk8UUc2pdUKh15Faq+SJWAx5JA + 8Bjljn+Dqun1K7iYrw/ZhgT4pbib1mBEaWqcg5XoxVyxEU4L9tNhpj+CsFmheCfUWbd58AwDE2h7 + C9EmYyPC+KUzCPkoxOb8VqT+pkxy34F6T9PpsqG78jjvjDwQDqCXjUTPqPK5lts74nE/SkKUcij3 + OnQxOUqWBwAeP93nNhnxVGb+DMdcRAQ0BzD4+4kbuLY1R94jmp4yFTR/txiCHw0Bi9mZgGW2mYYD + PyHhMGIzQNN+lEMBMEx1Wk0ErAA1hnOH9fDf+nmcIcicK/I5tz1BuOmftmozApiiCfi8bKv/AJoo + MqSmipqn/Ub6axTVyOQtQXmBhTaLU5dauHGDWRXLO21hhgHXjbEPZidz4IejLeorYifbGBQ09eYE + VwOOkZGWNkzGP4qcHbJ+ongT8ZgHjMEHjGQHxN5nPjuJkDJzWcxs4+Us7At9VvHt9q8rhi2U7tby + bhVVxdiOMpZ1duOyNkbIIbOqHPJ5n2eE7Ohou5DBRUj8yWe6yJTYs4anPIB1bDl8wDCax1hnH1az + k16ND5B+TtkwnIMU6nIILfGpwk2qYW9MdwEbKum4zgjsmsY0MxjwTFbDQNM5n5BhImZnpTB4Az4H + UyIuMZEH3UwmJacBFN05NIVuJyErP7pg4VbHc8dLDxvfZSvL4913Gp5qPXZ+n2e8yvj288+n9nQO + HyLJxTx8E/tk3a9bOM4NdDrFrIalVAcjHIOrscH7jqRMkzUYStbC25gr/rMP1BNSYZ/z/mbTdhAS + 0+OuoMVRhLAI1azdljKtkVdZmNgBj/BT8ceRD9kz78ZmYpxM5gh7PQWAzj/6zmWruDgHkOual5d8 + r4iIL3FHIyBH4ykq36lK/wB0DatdyVcRm5i6olh5sWvmkqOOFSu4ROKDP2qiV8XR9QYuBORYK5dZ + hv8ASpW0sOB7EeNhoa2ncovFcapLRqVBEEd58jFyIYFm0G2DYcLnHcGSa9ozzeZJ8EZjKVmMwTHc + OYDPzG858gwQ+BM+KjPYNj/n412eoQeLyyz0sKjersvJ46sCjJf+oBZTyrqrq+Ul62XVAh6Jxylk + 1n+fHRmxBOCLiwlyMDX9Vt1Y3WcHaF2E7xiJaaz7EsVsGbYhPW5gOZmYaHxiBYoxCJWTh/tWzNcR + cE4hSdCLWSdNfGMwVzQRlmkCzHhfsrkQGFpnJPmnoKNb2fMwBBckH1nrMY5l9gIqUs/qLwcewn1E + hKNxyKGQ0LgU2gwMDNsFn1jW4CWbkSxhDsoX0Wx6zUVqFktrKOlZMXh2mWVNWdHM9DmHZCmXUoGn + r6ZSJ9BVLTUzM6nRiZEziKQZ1LP9Cen4/BSxnuxF1fwZiaY8CY6x2648EStRNlhpV0ZShgbyk4/b + 3A1P+5AJBYZ/boLiAOQrF+Vmcix8GhmTicMA6AAVhZiBe+VQtqamuAssHJxH5K4V8wbb1poSTLBv + A4ra6vaUcj2S2kiV27y+lFVUtaHimFLEimwRgcCqt5bxnrhDmaNjXMcawwyvuOkrJ2K2CVsdz0WU + MET5VjD2U4jfdvTVZwDiHYhU2BXMFRnqgrIArE0XGiY9KsGoE9GBWrCPSHH7ODigR+PP27Ra5XhX + vr9tbhqzxwFRUy7L0aGMeghFrBmsBxBbDYMFxGuABtzLGDRB/W+EmxBqcLK6u3/yIfjMJauXoZkW + 6Je9ZddpuFX96MjniDlVxbQZ1L6BOM4cW0awdx1zGHRjSr70zKqirE5DBYMs1mCFLKWPyW5sk/Nl + 72MBi9T9wIuAIOpsuGcYFsJ6yYWm3xU5BMzOsCfgDp+nrORyuMHFm3srs3f/AHHOAdWnSr/ztm1m + i2ZhbKs2AHLDEZ9ZhnDHC8Tiu5xqrZNobJxChzsLI1TVwWK4Yax1L2egRuPGBBqv9cq5KupbIfaq + 0H2JyEKOtqWRhHEMUYlLAzcFicR3yQ5U5zGaZ2gysKiGAFZ+XszAmQFn1LNmibYHc9UxgNkKtkuB + Iocsuxmcw5xuYLmgOJn5p9LOVVoaiQtbjVk2GrLBGbIRcTSaT8FDYorAFhClaZbZpOJxfawUKtj4 + X8Z+fhwDFOZdTpGtYCuzLbdRqEMbjgRH9bVvkXLsvBeWoQ1vEhcqAfcz8dq6sT6ijtmJ8r/iDvzr + NCITqx1cAsox8FEK5mrCesEKgWHoF4Zt1kzIsbSa6wUrn0jZ8RxiV/IARlDryKjTfQ+JW2VxmPVA + vQWYhWLXma4lmFRacy+/1njcY8i1K1VWn+o5wVB1R2K/fgriexkN/HDKwatkfMUz8WWaGzDpxnlg + ytZ1vb5VUt/ZfSm9lT8S2q92NlPvZ/gQYevBWDIh7iIcxnCzLmCzpdXhGh9TGA5mPGfBzN2i9i5S + 6K2Jka7B4thUjGMT6WBNlr/otHiypbFGaLw2IvcxGrmsCzSYwGMQe5uTaFqppblWIiov1CDZLG0m + d4sb4lD4DQ/1T/xnIrDgZpauwED6tUlFdq3VdWVdhbWan41i21hAvItXavUW0XUvxnS51Odzr2VL + T1CECGfUqPxss6PcB1JHYJWbd+2AwZE2DT6gBmJrmCuYhC2Qqs1s9hBaf5mPio2gU5Qd30e2viXZ + 882g54bbpW3Y8Y82NgduwxVWw9l3Fo9STBY33LSNXvZVwIw+PGPxjiVsLB/4s49ZtoDhksoai8OM + bS8Hbi25FQ6vq2XiH1XXp83HQJFdgW2q6tqXUxVjtpAUeMvaj5WLXNBGQ4XqZG0VC5HFE9GIJ0Ia + 1yAMdCKY74K5zjE3wVXMOUK2K03rnp+QFawanx9zmVtTZRaLEjrsoP7bl6kFD0PLHExsf8nl2F7O + MmZiYnK5a0Cin9wzrCMR+pmU/anw/wAG6sQdTul7KlcXUNU3F5AsHKp2Wj421jrHToPYyb0qNqru + q0P9FyjkJ/hhYollosgwILAJnMJJIbokzaf9KcwMVhsebmK+0zscQMIJ8jNTNdSTAcz6O+SKFDLR + AqwoDLuPgLznqau1bF+4av2zVWrYs/U6ZxbBYgGIDPxDMSw5WnscVcVTmc31Di8N7jjorHXpv8l8 + rxO0yPB+qiVLL7AU9oqUMLOPur1Wca1HFi3Kar+M21f4Ujf/AJ4465q61f8A8yHM/UeJlNDqIuAv + qhXWYGZ9nWfkQmAw4gTB1GMawYgdTM4m5zHd1PyYrW24GCuDHsnsMDidMOXxjlLXonH5S3L9xx6W + Vthcgevjs3HvzkfmNBDHbLBWscdDm84Vni8BnsC4EdlEturEflpjWx2pArXEH0ploxEYOhXYXVsJ + TyVaW0rauDx7OXV7av06zKsZX8jbZqOKMV8lQ1Fg1q4/ysexdbxo8wTACIW6GDHgXo+MfHYz8ZjE + qtdimMelwYRGU4VSC2TDsZqc4ySyh9sA9wsRGsLCr/P+hyKVcV3Px3pt9iMMir+o/Y/UKdk4V+zv + 1A+Yxh+84b/0sqqWuczkd8bhdjwRGpqlnHoaftahGQEFS0x0uDD8LdAVo/rsR9LMAjlcY1Ti8kGX + VLamRVYg/b8x/kq/5O5vqGF5Da1WfKitPSj3bl6menOrtjBOIfqK2GrAywE16+pqrAoAuiyrlLBh + 568EKM4x4Vvlh8qrCHORmYn3Pwe5jY1t8U7lvHFk5NXsnBsal1McZCPsrgNO+NyG7Ss9/l3wOTbr + XwkxL7fVVw6TFXE2AhtENsa6ezM3nUvSIcwDDWjqv5LyK2wye2vh37KVDDlcM1tw+XmcvjrarVsy + 8d967XGOJQABOezaUV549zZKaIa762nM45SB8IBDmZ7StcEQzbvHQGT9TM9C2T0aj+xZ7Q7DBmuY + RiDOubBBeNtxl2Op+3OfAn+RXZiDtWVXW+oVPW/WZX8Gz1y68kWjSs4Zjhb7PlXR7FrGqvT7j7MA + uYt2YzdM2Tk57n4WdPMaP9x1ynF+SlMrWCrtUarKrPYjDI5HEANDCyvkcVqrgpVnr9llY1G05Ngd + ukqvc+xELypQgzutiaOdZ1FrzYbESGwnxjMLEFLDguYGGNswAbYsWaMZ8Yr9ZbOZ9xpq0t/8swjw + OoczMr5JQ7BhzRtx6LcoH6Y/25hwwxhlIDnuOn9lSdYAj2CM2A7kzEz0TCuYIMYzMiWfSDZSOuM2 + LRHrwbE2Sljx71bYOgYWq1N2wupHYSK0stwvH3tv5t4WuivdndUPuEq5Cmc1P7VRY3rEBhWKFh+w + 2DZ9bgAD2MOPYYKVsiFgAwaLrBo4s4+xHHWesswDJCyrGv1NvMDID8fH34MrXIXZX5IxRx3xx0ty + u203gbMZxFB3AIBGI1pWC7LbwnImcTsgwZmZmbQHMU7CvoAZF9TU2izpTsCJy68jg8mFlVbeQLIv + xO2SpjHEtta2xCOPXoeZZbihDvZFRyUoaIuZdxbKpjJXEJ+Xj4wgEa4nFwLf3BhCmYgwkbx8lPuw + ayXFnL3gMZvHG+SzEX6xmN98d9ZqGjj+uyv1gW4CPNutp9tTTgsQse4zZTBgeB9TOQp6P34zPuKT + F+Lr9o0YBh/iJyFUhgRyKRYGD1XW3G0IwWexILBA6tCJXx9HNCWtqal5Nv7qLSKqmEqDOdAYDgWU + V2S7jsDXwWJXhUCftePDwuPDwKo3Bsn7fkVMa9jtqCUcl1A909uIGDHTu20Fc9j6b6zOK+t+OzFO + Bt0RmBe6ycbfFj63srraHKxYgyUowSwVXszDNWJxhQe8mfmD/WCZriYmMQ4VoTM9rDLTAP7Kn6yD + Ocf7T9kwGB8TeV2YPJOIpUOqrNAJYdJ/exzrKymucQZi4MavSLYpAsE2E2EB+PUasWEsojjaerE1 + eEmKWR/YGXUTRYePbqcwDJH+vycTM+USBorYLclabubxt1AzPWDBWohyh4+z1vMkzXvOCOx/1jvW + Zme/ZqPbPZ3vmbxT2Tm2v7guQl1R56qZhR45u/vVxAwb+AaXt/VYrYpvM3zNsreuq5DDRNRACsNu + k/cIsOLG169Yx67Y3tUBiEVlZTyGMzZNLSBQTDhYLCDrmLWSa1CjMt+S6KR/m0z6n3FgToDEAzLe + OttZ5dtUDZK9gzBYioV1ssVOvxr2mYIc5I607J7d222YzYwDMxOhKvlajTp1XVF3WDkIZ7hhuYJd + d7RoI6GbMIj7eMGEn1gZrK4CcjtLAxbIiD5KO2HW2fGqZ2Eb7NpDV25jPGAEH0C4tUjRDsykCP8A + dhBi5xVguQsCwnUFcpZ/afwYB0sQ7IqTUCKOrEOyyp8Q4M4o+bHIC5jOuCczcTHSMRFIlx0UOzxK + o1C2IwNbVLVYz1+qJ3LOLuDS6Cu1Fn7sRuUJ+4ILWs07MCCaloqaqV2jJ2EGT2dTsEUrgKM4hHVZ + trY3ElQTNZ6nYWJZU68l6ytzFug12zW3BmXYrN4WE2UTUPPxTPwXKG3JsHbV4wBmAsFPUYlq+sA4 + jiL9D6pYQ/5A8XdWsBEXI7E41biV7s5Y3ErswWJTg2fWfHLtyKrZXaDMzlV7BM1W3XPZPbiVXowF + +s+DAitpoDNRFYZgIEOGhxPizEqqWKjzKqn2MTXB/NIFzqpLFestn1OUVtHL7HkULAwD/uUlhyw7 + Ur8sLNacbETIx/qDqIbDDj2MmWVMRIGAJb5PAjCORKztTYO1X46zHdfYDTkX+tG+Td7AkPsJx2s2 + 1ayHM/P5P23cCKkvYBT9hIG1iWQkEFMSk1vLOLS0KkTAi15OOiuB/mBa2IqWCmf4GGMGymoMWJxF + KidAouW0RQ3xh/0uALSHn2lb4j1holnz3ZpYmLGQmceh7rUu+PLqCWGBRmzIZvtPv8jIX68fEtFY + PCIxyN2yxAThHakqSijx9T5RbFsP6lpZXnB2y7/X3KL/AFMGBRD7YVxMaRmzKRLv9XHdtDBB8pjS + C3pHM2NZt5B9WVWfAT7hafLGWEY6OlqNB9N1CwYKcAPgNdlh9MIhCz7n5J2BbM2yo+sSuzBYj2vt + HLaJlpwSvrcnflvtxrCTFO021hX5DGfzkEE+A+K2UWpWcz2Hbfot1Y2z0E0WK6vXgh4zCsDuPUtw + 5ICkZwemzlU+lUGVMPSh79vZUmBMtbbqt9nrrVMjQqAo2HcGIOierWaDLoVM2+PdkVCJqYRmbATU + PCoUqpygh7LK0rTE1GQVEwGUexQLTn6jTszOPDd+cvCZxrvXahU0sBarKc01am0MHCDGezjC9QZM + xKtGVBqfkFAn2LDkYn447lbFOwd9YWzE10v5lgc5aKDPvwBgrZ6iORZvxrF1Vkj2KqC0ojcnSMS9 + tSgm1opEJ6z0wIOheFVsRUfOxnoBLdEO2RZZg7GNttqcYMDYgMz3giVh46HJRhCra9iE5n3Am5Qb + QrB5UwtlPlsRNdojKC4BAPxyYfH3F7nY8KxNpuUQwDoiEiawdBQZRy9V/f17X8pGjcgvAO/qCHqD + ZTjIfuKFJC9jkaG3kmwXXm0BclABZsizO0EIzBMGbkQtmMTNoGxO4yFZ9TM9hQ7dhmA2Qz6gt+bM + QRyJ7iDVraWuX3C5QEL2F6TWuU3X2h7LdLjbvN58vWALC2EU/KLloSVnbxCGXTIw0Hhu4Dhw2xEF + Yg/0W2A+ZcEPjIx0QYQVIBwa4UCTE1M+UCgwLtNfljxrCuAEnrE0AOoEA6xibTIj/THpDsTkWfUb + Jj1q6/5b6hOWEttbPqwtRIgVrDYpyE6rUZUMqqjKaqzfdf8A0ni0m2zlV1reliVK/L2ldb2Coftu + PYd7cylVRX2LaPRCwhYSpsS85iYRfaa7KrksOs//xAAnEQACAQQCAgICAwEBAAAAAAAAARECECAw + MUASIUFQA1ETYGEiMv/aAAgBAwEBPwHtLOB6Xrm7utEZP+kPvPrPsv6hdhr6iBJHimVUtb/Bni+p + H1NLK6Y93VLfB/GeC/Z4saeKpk9Ukkj99WSbrN92l/DH+I8aUeX6IZ6/Z5KzR40ng/gp/G2P16W9 + YO6xV0PJ9acqbc+kf+T2xUjiRpVHi1wTPJxZ4/OTs7LZGp2RBHUpqulaRtJHkKpvgpVR4s8B+VJ5 + FLVVmRd+ugtjyYuuvYrtyNyQU1QSe3ycDcD9s4FVdkjeme1HXotBVPCH+rJXS9EEShD5EK0FeuM5 + +ogppgkn2VOEMm1FM41UxaSlkwSVrFi2qzeK6z1U0tjKR8lXv0NEEFD+BYVv4tBHoq4H7RSVU7Hl + GawWCu+pSIqJFyVf4ORMn16KV7srMrXzZuRTBUSU2qpvFlnGhYKzER26FZ3VRS5HQfxnj7wgqXs8 + GKlLkdXonCtZxZa0IeKuxdikbu7Lmb/OLtJVi+osFZiH9CidLY8nqV460dJaKbPOq7wdmLpx1Y1L + RTZq0k4fkzfQWbZOiN8ZU6KSkqfoTHS4lEsSbQ3arSsWLa7LBWnoRoWrk4KaoJK65F7ZI7R3YJIO + LRujUtvledcixa2RvjBYPStsa4IY7rGJIxe+dCxdozXXgR7R/wAsq/Gj+P8ARDVlQzxZ4M8WRvWM + ZMjGR2TzXXiCBP4Y0JjVoEiLwOlMdMWkneh4wRlBBBBBBB4jRTT1qf8AR2am3JN/I8kSrtWqo/Wx + ZRaOnTwNdRFJGiDxsqjmztXSR9Khj6aQs5E7wQJxZnxaqn9EaX2laR9JXWfBN2xlIxWRCY6YI+hS + gYyekvQtMHBNmSId6bMqpj6BIkZI97uhYLJqT2hOzKWMmyYrRJVTGEapPLoSTflYPaiYKXgtEE2R + OCdkNSNd6bRfneji0idmU5yN4PGT4tUsZJ6yRBFk83rm6ESQ2RnI1g8p9FJA/TxjqpYVWnJbkLWm + NZK/wIkrplZvGekx6HodoIFSKkgjQx2kepQrVr3JGD6a5ygajfBB4iWucZHkrSJCQ0NYx0qec3RI + 1Ar/ABohipEhLUxj30okm1XI9UbF6FkypJrSqZEkQrRsqyQ7QQQRZKyJOSGeI0R16Xk1JMD0L2JJ + b3xoSi0EZqzcHkTZogi0EEHBH60TjOdaxgi1KkSjr065PLKe3UMWKUi6LtBF4F9W3Zsq5IxpuspJ + JxmzI1TtnrLB8kklWLKbL3kybSThJOM9TxPg8mJz01wK9dmycWcCzqf0niU9OliJu3A8nhIrSNxo + nNa5J1wc9JCfsbJGOyxdkzyu2Jj+n/3pzedCwTGxDII7MEbeHapi3snCM39eyY/oS1vn+hPXEkW/ + /8QAKhEAAgIBBAICAgICAwEAAAAAAAECERASICExAzBAQRNRMmEiUCNCYHH/2gAIAQIBAT8Bwxss + vYs2WX60hIY8IWEhkzVxiDJ22JUW0JieaLaLKs8y4Ko8C+yTos1EqZ4yXijITl43TLsWENGnDIrL + XBGHOPNKlmyxse5F+x+lbGXTwu8z6PGuNl0OTbEuCiS4Joi9KrDdC8haY3RZKKkiD0vdqF/exEuH + ePJK5YUW/gov11lCzQ4WLZ3LaysIl0IZJ1mOXGyUC2j8hd5UaxWJTLFPUuR+So48fi1diSXWyity + Flor4CFtkqIjxCNZRexEyOJ94Ss08ZR2OJoEksJbJ8LFCPJ0ePxXyy2h+TNl7FtsvckUP1IW10aR + ke8voU6IZrHkFif8hIXBY8K/RYyfWFFs0GjVLkbUeyXkvY9q98o7qZWFsbo5kKJRJEOx58ipnj62 + zI4kuSKrYpofZZY/Ix9HiwxExFZlJRHqmyPi/ZpSy0V6a9ckJnIlRZy/o0vHBbNQ50JXyLZFDLxO + NkONsuyHY9sihSFxjShi2N5ePx27li17LLL22XtkULhcnMmaooc2yN0JyiKafZVdFWURe3yfkTIX + XJaFihbJoguR79KJViLssaFmtzNOpn43+/TZfqTy3ihW2KKKiuyUosUkPyCcZGlEouJ9YTy3R5SL + TJ9GttHNcEfLfDFIstYkRzQ9lo7KKysXvaG9PR+Z7LG/RRW9Pgea0oSrEo2UjhdHYkmLhFDj9H9Y + jiUW3ZIh2SIodDsVpWadSPwf2aGumKX7FsZWHhb3zuR12eRfoaaNSL9Dyiy9rZGf1hshS5YuecN4 + +xu2WdNDEMkrLwsTQkMiIQ4krSI+V/Y5nLIdZeVhbn6Oi7KxpK23ixliy3QpFlljdkIsqkaeLIK2 + LMpVtjKxYkrErKNVGoZC/s7KGRq8OJxeGqIsvdQ3tZRW/saoWxnO37xQsMkRFhoUCCJi/iQ45Exy + ov6JoeyC+8JplqyHYuyZWNIiyxKymWyhsasXG5DeK93e5lieaHtkIQhlER9kuiP9ir6GrEueSXWH + hEDSxKh6bIdn2SWOCyxcCE6Hihf2PCZZe5bWIRW18iW289GoW+QhMs7xHDQ4klpYpn5DVazwWQfB + rQ5XwiMeSucPEusaaEPgbsXKNOGxCiaSity3ylpFJS2MS2tjuy2hSeGtl7KNI+Bsh0JbXyqz9bVi + iPWXifA5GoihvCrMli9zOcIWUIZ5Ojxdem8XTGIu0MbYhkRobEx+ShSsfIlciC3Ma/fpSFtaJRKF + /EhiRF4d4huZQuC0IeKOi7OyPocyhlk+jSxcYark1WMjwSmNWjX9Yj1iEeRb5d8YW+O+aTEPojiT + sgihrCosvZLFjExSzPoj6tOGjSKhi5ePI/oguCiiUbOUiKEjxxNKF6JXhPZQsQ3zVjRLoh0NiI4Y + xFCIsbwysuVDkzVwXJmlJWRp9e1EuMaeSI44rgRrRLkiTIS4NcnITv0MnRH+Q4/YqNKHSFE6Et7K + Q0XRZF2IkLCyst2LDQo84oSEhxp2hN/frRRMQxFjdM13hovHBFrEGvV1zhxsojGhsoXoksSjzwL9 + MrHOVsckI+x85ootrapUy/SsPDENDQllI+xpUJDs8S9tZr0MvDYsqsLKZK2RgcDw2Vl0X9D6LKNO + UxO9tFYZEe+WX2Mg/r5DK5H2S4Iu3wMSKxyLFp5orFrYx0R6JCGsaTSiqO91iKxRQ9jWNJQzwpr4 + 94tMdmmMj8bi+BLFiLLxPgTNQnso5w2hcj4whjmk6HIsSOjW/sb6rH2N72MXW+H7+Pdl0xq+URf7 + JL9CY0ahyLLRwWNfvCRQtjIyJq0cFEkyPGK5GsIY42UWUaSsViihxNJpZRpNI4H4zSLj40v6FhOh + qxWhpPouh8mk0Mp5TwtzH2N2SdEMSdCkXZGxiePvFfCl2RfxGSL2WNVhcIs1YcbGqwsLd5P6KdE0 + 2yEeMT5FFJYSy8JFep+hiF8NsexYsoazZqGrGRH3i9jFittbV38N8IRQvhPL39jWUhEkI+sPCZeH + E5Oc3tbw7XQpWPyR+C3YhfDfI/TZ2VhFDwih57FhixRW+jSfj9jLzJ/RQsL4b2S2d4TotMawiSEV + hoeOhZpl7LWHYnxsr4FFfF7JLZLKy8JlYZQstYfe6tlL41Yv4tElhE99CXooo/7YWyxO9lbb9zZZ + eK+KxlM4Q3exbF6q5JFiwjsrdea9reyPxmWXeF6KF6X2PkS9DL+MhfCsschyLL2rK9jw7KEPCWxi + WF7n1usTv3tlmob9KKK91DlQ2LHRZeWIr3y63qTQnfstDkNjfqQhe+bKsrEXuebP/pa9jVj3xbT9 + LlQ2y3i/ZH02WWy9rXJwhtGr1y6Pwr79sluuirF6JcFt++PfobxZZe6WFGxQK9FFe5rfBi2WXiUq + G7+PL1tGlf6GLEPa3Xwliyy8WP0P0OVEZqXRfyUsJEet0svcijTW2sIv4zivsi18Z7I9FFEd0sPj + FbEhRzWyiivg1vUVQ7rgU2Rd/DfY8wwkVtWHxvgviv21XQ1wfjZ45f5fDkhlZojuWHihorCV+9sv + attIr1IUFyRdr4UhrgSGhC9LRpylQ0Lj4t7F7KrE24/5oTUla+LWX6WhIkJfLsv2Loj/AMUq+sT8 + n0iE1p5fwK/3DF0eWDl0Qm4Mar/xXWPJGpD5/wDFslDUhwcTSf/EAD0QAAIBAgQEBAQEBQMDBQEA + AAABEQIhEBIxQQMiUWEgMnGBE0KRoTBAUrEjYsHR4QRQchQzgkNTkqLx8P/aAAgBAQAGPwL/AGKS + fy0Y38M+KzLqcJ6f7NH42Xwz+FfwydyR0YLC8f7kvHPgt+BcaJIaJoxtdktz/vs/iSNmVX6jW5df + lMtKuc1VjX8hp+UuWX48EE/lXUy5NLg0v0JWmNkc3EpRavC9L8NjNV9S9i6O35SPAof4liZf5TN1 + IE6bdDemr7GV6dSaHYtTPqZfhW71QjmfC4ZPxm/RERYf8RU1dHY8nD4npUWorpwvZYWXuy3vY7Gp + P5e3gsR4LvC5NNyKqfylEdMObRkTIqlrg1w6opWrMzqddRnqt/yP1VFTVPLTEibfuZuC8xk4ubh1 + oivmX6jMiG49C7j3k5azyw94K/QU/lLeKSfDG68Hl/KOqjR+ZEvQsrFy25HTVipixSqOFn4r0seW + pLsJviX6Mqbav0LV6idFVUGT/XcB5XpXlM1OnYy4OxO21i7FwaX5vN2RbQicOjNPxZZC8CwnGPDK + M2hYUmv5XJm5Xc5pOWyHVX5RcOm1OH8StShf9NSq+5mqqmqrREVRa8HK/wDxF8OHPUXE4tLXVE8O + pOkXGSlLVCro8rw5ubszLQOPqR4NcLrwv8LMsJf4ULctqOfy0xr9iN+x2FwaNEX+W7FdLicaq0bI + XDjNXVTdsUu1Cux1b1KfToU07vUyt3V0x1v5fMiabpqUU8Th186fMtmKniU/w67T0GnfhvXsWZ3K + p1ZBmWjIwSxiJLeGWvwblvxLP8vrh8OEiV5mUOiFW3Lkpz8RTTpBRVxfNQ9VuPh0uMx7z/YpT2Jk + b2q1Fw6noV/Bp5qm4EnxVxF0nQ4b4t6eJaoVraGVzDM6TuQaErC5bGUX1IeEUs5lKJSZLfjt448W + n5eBvY9ymhaJXG4zU0atu5m4VNWurZHGo5f101ZkZ6HKRGEFh8WpxSfwOHVVOlTshcPjUumpmVUx + lucHuUVT7ETKM+5EGWD/ALSZPwqS9FJoX4ZEM8/1M2ZMlU2fjgnF31xXgnwx+ZdUO4/5XIlSr1Mf + B/1Kiiv5h5aPicGq6yjp4XBqh6zSP/T8W9OXlZYuPCj/AE9GtU5imuuiK0uh/wBZxuThcPSTOqP4 + bdrbFL/SzS6uiDSosn6snCFhNJJy46izUkjnwLGMc2wmRhPinwW/JTOKjc/1FX6EfEpjSRuyaIor + 5ekShrjUU3/SRtPgk9VBQuHwaamktSeMpoWlFCsTm5qlZHOr5Dh72HNGQ1M2afBHgkeF7kUlvBJK + wRO2GVEMthHgj8zLI2L+x/qKM3m+58LVCfyvoPnv4/QVhUKq/fUU8Wp107NHFjoZuHeqjYeaFJ2M + 2Fi5mqLNGgvBBfw3UnK8crxt+f8A0o0J/U4XcyLanqKp6aMzqpQU08Kha+foLh0UKVrV+kdSqlae + OBJKelS1RVXxqf4itPUp4FL71ehxbKM2xl+Wq9OENFsUsNReBvfC5fwxEl7E0mhfCMJJ8bkmfydr + nVmf5nyrB0VvVmWjmpqK+LxamqKfl6jr49WSit2p7Cpp0XikTPiJcyXNT17ioVDp3tuVcTiz8Xid + dh1P5ilvYgkcq5qPHv4Y/C7F7lnlf5ifwO2FKWzIbvSZqfOj4PEf8Sky8KmluvqU/FdfErj2WHfw + xVodiWnalxV/Qz06RuZlK4aFQti912JXNl1gmlyTvhL3J/CyseZFhYTjYitXNSVEF/z1l7scuY/c + XD6OX6kmfh8tfUy8aOSxK3siuqp3ejIq1S1HwuI99WSorXY1M0wurKY8lKc9yzhKUV8GrzQKmuae + GvlykK0ERPdVIcNVImkl0ulkrXvuW5X0Iw1xSqYo08M438EEVHmIpZvGE0kP81CxphPInJL4lFM+ + r/YyU1V19qaBr4dX/lVD+h8Sni3esDq4izd3crfAhKLNMUrNSZ01PU05X98MzccJddyODRmq6nxK + 6oi+Upy01LiVdHdGZ1fVDdPF4bXSSXlvvMfuS6Kl3gnKv7kpQQxzQvUnCVhchWMubQnCcZx1x0L0 + lsMtSM3DwnR/nP6seZv+yMy4FL2Tr/sZczVH8vLThTRQm6toRHE4b4lT+XzNewqv9LxlTDum9zLl + oqnqc3woMlaVRVQ+I8lP3MlEUKlHJVwakX4qoW1xrgvPxN6uhD4tdNtVVZnPlqf/AAy/sf8AqU9b + k0x608r+hpP74Za1yvfoNarGxzUXIWMPQzUEPw3wnCUyCSVhKZf83CMhJq6q30FZJLtJ/fDkTv8A + Lw7fVmVummnemnT33Z8OvhZUvmVoR5+M+kvQnRVdTLwuZ9R1Rmzan80TlqGqppj5qSaVxan0b0M7 + oVNZ/THvi6Ws1LLJx4ZLPGxOEE4X8cYPDLUa/mdbs5rxc7mZtL+b+xZ/Uu8PUgVdKhfKv6ltb3Zl + dVdTX0KXGXN9h8sX6GV6/Nmeg5rm25PzU6FrPen+2EVf4ZKvT90ZtVv1R1XUjUaiUTS89PQ2VR1R + Kc9jTDyGhZHlNGaFnfDTw6GuF/CnJbC6Mywv+FchFiH4b6EzroSouZKb/qrPNd6sWbWry09e7Ofz + Mb20RVHsU00fNqU0jqqRprY00NMO/UZmQqvqNVQ6N+3+B07q63//AL1OR5alt1Rm0JpM9NqjSE/s + Z+GlPY+HWrman7GXicxmpra7I81X1NSKS6JdKaP4VeWrCEXIZbxS0XLltcIZmpJ28Ek3/F1LnLhl + +pmO5luZqv8A9Hxateo6nv8AZHSxTA30eFK8ElyrsM1sTNyU7U//AF/wT0qOXXphKUoaq3Mr0JVq + upl4qldTNSznctY3qxmn7GSrUlfgZvB0Z1M9IrD6YaYZpseXCxfwWI/CiS5NKuU8GfLq+plmy+5b + QjB4PpSj0LiPU+2NzLFzPUWLxBe/fCaXBkrVyaSKlcs7Y293jbDMtDMnc+JR7rC2MjTwvhKMxZ4Q + y2F98JROFsLkY2OZE0MvqvDBceNXG62HXu7IjuQMuOB4sXrg17DqfUynxKiESz+YWN/ZmTia/uTS + Q/Bof4NMYJPicIuKlal6JT3WM/hRVpjCZGOng0xzGsGuEkmUzrYTWGUleWLGZY20xQz3IGZ68M1U + 5SFhdWRHQ1uS9SS+hFXl2fQjiXp2qM1DL+C9Mpk0wZSYLVGtoNdbPuZalarcTfsyG+UtsR4LeHuS + dV0KrSi25Jbwu+PLqZalDwsoIZqa45TI/K9MWmh0a3sWdvwVU9is7EJYRFkQRhKJW+GSoiu/Df2J + V+GyUdsJGZXoKpaGhnV0WKqPfDJXuRts8JZK8VzlE8NSxJoxKYg1kt4Lo1LGhpclI0LmZE4xuZK9 + VbH4tPyjVWpHj7EdDsjTDthndkWOgxqcJWqIMtV+Gz+R4SRvhcyVYO0mXQp4qwuKSKvbCTmRZmpc + vYiSEQyN8IRdnmL4WcGuFy2ElkXZ2IMrQstRGZFnOK43DXqJp4NDoen7if18V9MKaaTr1eFyF5uh + 8Xir/ihR4Koxz0l9z4NemxlflwzUkPzIlYLBRKGhFQvUgy1WaNcJku5LPDQknHlZqXPQsakELC5b + CCDLAmSzTDlmirqjLxeZdUTS8HxOGpW9JNLwp4q21MvTXxtblXFrJ3qvhlovULi8dyuhCxaGVPwZ + ZL/Uy1edEOzRBm+56kieGuFU9R4UoXGo13xuct1+FYbnUszseng1g0sdC7sTJOGuOalTYUe536YZ + 6PLuiw0+hXQ9J8Tgt/8AgqaNPmw+HTqfF479i2F6kjzos5Ohl8GekkTWqPi0Wa1MtVmXRFSinYzU + 6oyvY7klmXKk4fqIXYaY0jQgszmZrDL7FsZLrFT74Wvhd2OV2NDQ0w9CC3g1ZlqGvmWy3Ogng6Nt + u2GdbGV6wT4ctOu5ZC4NHmfQXE4t68bsn4dP0L8Kn6HLRgr3O+MdR0MjZ4Zqb0mWp3IZ8Ju3dkbP + C5lm3rgxR1MxYzR5cJxvg4fhg0Irs+5ovUSzDWYu7EyebwX1L04Th2xnSoitcy+aD4NWFiS5br4J + OUq4n6h1D4tfmqLmnjnBk4KtbGanUyPVEMz8NHw69TT3Im60ZFWq1Ms6kvDla9GXUGTSC9zKZloz + K8USWwuSnPgmBKmrIyaqFVG6FsSWwsXghrFY2IvhKIYm/Z41UbTbCmvoIl9cHTsXIKc3kW3UsoL/ + AGw/sRmInwwSaHpg6Wmj4tFROGfh2fQhmenRmdK+4nGKozX6PBuS+GSumzHTUamp2I8ELwRZdDzN + wPoa/UcaltSE0zuSbSaX8MCxzUlXZFD6rBPqsLj6ISe2D/mYrHMyw/7nphDZ6GbwqoWFVLWEo6GV + 1SsZTt0OpHQjHPmzUoyJ3Y6noRbCGT1wtqWw1vjOHKpfY8pVz3IqrsXi3Q1cHKedi3gf8SS9Lvui + JLSRD8PfwVehr5ahd8YGX3Jr16FkvoamsEyLoMsThbwtY/Fo03E9sXFP01MsyZqnCIy27nI4NIxy + oknYtohuJIg8x8Pi3XUlc1GFvBzIUV+2Gscr0OVQhP6GmGeEWZZEVZrdCVSRTZeB09MZwlamlseL + EZanNIlh3ZYpXe5nev7YamhoW0w0xla+OMcrIw7kZYZSnsXR5EaYdUVfuc3F07HLddhcPhmXCxdk + STEM5LnPWl2VyZqfuWo/+zNKl7nLXWn3Ip4lD9TOqJ/43Jpqpp/lq2NDQhKxBZU/QiLs0OV+9OE4 + w97eHU1NZL6D4buti3JUQakInc7+CI8FvDqSWwXg2xVPaX4IwifqKpbnPXUu1Jaqr6jdOrPK5LKm + PU5qXP8AMrEf4NH6yeZv2JHVTTm6nmuanpeRll6Qjo+hOXN7EOmks/6iiv6kzKP7CqenqSoaKm1K + khKH2JXDq+ho/HubYKjiOFXoz4lGq6F2XqcFmzr3M1VdUdJxuoJOxvbB4Sa+JUiwy502S/seX6ll + BZlTqUKrR4W8PuJrQy1E7HNcz0ttb9UeaaN7G3Q6+pOV/wDyOe3cmRumnUvb3MyZKObQmJgTY4oq + nqK+ivJav7F3m9zKj0JpUF2cup37E1JVdzdFPhmC9sKV+nQyU8R2JxjqU09B2NLompZW+8+Ds8Jw + hprHXCR1YRVoRTSkjU80epm26kUkGxYkuWwc7Gv2J+os+nUy03wqpXWwvm7Db2PTqeVPr2JVLfZs + 0gm/rBuXgdOp5bMso9ylNrmvY0/uObQrIjMR+24rO2Es1NZwdfyoin6k+DuSyUsJxmTM/bDuQjXD + QvsQZiNFhlaMtdLtqeWPQvTxFTtVJHxJXSpHI4jXchq3VF5wtS/VszKPoNVNayXk0NGtiy+pMfYh + 27YWYk/3FLhsh1RKPsN0lqWm10JJy+5mVSXqcrofuTVQ0Rlpf/Imq55fcr0l3PNoXudsZ8prhy3f + Q0uPL6n7nRkJo1T2L1aD7kCXgfd+Ox8Sqy6DrqtRsjl/7fXqdlthLwthTRutcL4KpUy9DT0MvxLN + aC/hLuf+2PcdVNNM7ol8M5VYvKNmdyXcjMluTOnUS16tGkeiOVZat43JSTgsnYUpH6h2V0NVPTuR + l/wRljM4vpJlSU7QL+H7FpjudU9ZOR6bCcaCtVcgzVZ49LExJT26nJrNzySaaGhbYnqPcnsX0Lai + m6J0k9Tb3Km0KsmMH2GZX5upl3WqGt9sIZ2w5VKF8Xy/pRbb7DlEkjgSgn6E1Fsbl5ZGSn6EU/w6 + 39GNOLdMERUxw37Gn1JLVVe5zfUtBmZZXJq9RseedRPv7YXm/cbp06PUbpt2FNMpap7i4aTtcjoZ + Xt0JVu409UdzpOgrbEbUiURFoM1K5atuhHQU0nLLREY3ZoVGmu5JtjOpU9GyqnoxMjD1JptVToZv + LUtUxOipZltOMkkbGZaGam1O3c7mZuZwdWyFBC2G1++KwhqH/QTWpEQ6tyId9y1KHHymsmpFSlHK + /Y6Gom4mfqKmP8nU6EF63PUcX9PB/wAS1GwpH9yVUiKp9zPvEEq63sZXvsR8y0KnvI+IusCq/nF2 + 646QSSzQthFN/UdS4eSpfczJX7nQuSsJ16luuE+w6tlqW0gv7M80iHi30QqI2MqMlCnuXMn1FTQL + 9T0FqibVHQhiuql0HZaiwU3nYiMpm+poXXuWR0FozRWITf1E5cDykPcWV69Ty5n+4lV7mljueU5a + W/YV7mopWq2N7dT3NIO41fuafcndMv5atSeI7ObD+G7J6FMqGN1eyLUtr0MzfsKCMLbCMrsOemmP + cjLfCN4HRsZidnqZKtSEx00eC5Hcm3Rjadi9d3qRT1JZn+aq7NJZnevQvsh7ZjU6dhWj+omjSIMq + dy1Eog29FUXS7dSczk0+gtzSX1JeDWb2g0uTEexr9hKl0/8AIjPMHY1Jj1g8vqa2Isjk3w7dcPXC + UpFtJG5v9TpsShPqsNFEHMz+pZb6ipnB3iLHWo1LH9z/ADhJrcy1bEV3pZmoc7GpGLexpr9xbCKs + 7EaSyI5RJWLib0FH3JqMs2PUtEzoQvoTzExfqSM9SKEvcu0uxGb6F0SjrJp7mjLUL1IVja3QiETO + Rk8TK8i3I4dFX1PNEjhWZcipwOjh3Wqgurbpl6dP0jcQS2rM8ysd8IV3g316FtC2MSJbmkDzKMLV + dyNurNWzKlzdWVUz5XGMLwJuVPYuiErkRBdwjln18M9+g/XwTBpfDU6WwjTuOmpz6EYTuQzymj/b + B0KEZvujt3FbXRyOhU+rL2sRer3LNc2pL1Hmq5KdkVU2J0kooTnqSko7mX6zoOuYkdfzDqeuHNdj + ppTnSTLVRqf4NTMSXRS1o9UNKz74f//EACcQAQACAgICAQQDAQEBAAAAAAEAESExQVFhcRCBkaGx + wdHw4SDx/9oACAEBAAE/IWVLpdDCbR/8HUhB8FEUYMoVi1iNrn4KzHEB8CKKpVAIBUJcz1AipQIU + EQYoITcai4I22RgqNYFID4BMGrhbipSqGGyYPxr4yzHEqvjW5Q7og3zB0jyfCmCGzqMI2wMTEoAz + eGOIs5gy30ZsBfE9TOSyUvLPA+B8PFKuOIs3DLowQQfAMIJpgXGkIQnwliOWoMcspJZN5pKuYY7O + VS8yqHLHUUK2EbI7mEpDuEjBrMa7YHwa2qJqGdSu8tHu7gdy4xsi1DMLKqIOYoShM5n3JT8jDL1P + ohdFXHf85PcBHMA3eWJHqQ9vxXwNS6i5jaOIqyv/AANpczTMKPxSuEAg5hVcBDUpCjCpLv8AEkLI + plN4jJIeIRSi05lyqYxLhcacywlRYGNAJ2/+FVlJqGT4F0IbEzBLKqhCYFsTHiYAyj4Mzmo4LqOg + EoSs5OpY80tpHo1B4Z+Jakvg+LzWbmosS8wmKYsPnGksnPwCBiJBpmsYvahKl/8AivB8P/gNfjyl + YjD41GfiqYQrEmooS9wI/Gg/GkNLFaal4PHcwzgEZ2y8wYkT3qCmmeaXIz6sOtty6lChtmmDZQ9Q + hiPxFYglEc5hcWWQgrNQ+AgRsLC/E4E8JxL+WZLiYRtRLgfMRMkUjmJn4HxbK0oUbPc2wEkNURZD + cFzaB8ZwKmPwau1lkVTFm8bbCWBVOaX0IvdYlCvEbS20WBbHGK4aTBlbocyl3PqWZl0uYVz/AODK + ASqwjgjmDp8zcCMX/wAT3GebMYD7qUPZh1A6tGLHI/HxFRivrcRd7DNwXcr5JvRV+5lwndfHDFtj + HojNFVymCNsDuPLYvEuM29eJeIJBU5hqWZlhLlxr4Zq5cZQZivFKgSkV1KVUNY5zEKKqZQZl2YQr + 5FmUtcekSNyrXusS+2V1FDMQSL9zahcKvgIfDlBXwWWYbgTbKojGcX41qXuDO+pU7aGOoTJIYlfh + 3FuPCRMoJt4diKZYXDsg8LfcH8N8/wABMS6+C5fkvQofSy5WD1AiCjS8k5k+chF2CzfUZuClpmMA + TJ2eJdV3ts/WMF4HPDFR4F6lts+HgiVEl/FcUf8Agyfgsm7EqEQELq6nJKnOoWwYKgfyzwiXK3OH + LIbwYEqxCDiRw0zD3X3EDEVLKo8JjSpLVKYWhuUS8TeFsd+DGOHwoymZ4yqjWlcRU8D7hzhbM9w6 + IY2HjxM4AvPMQIOSWw96IqpnCvMfw58oaso5dQAi/BzPoTFzBA6u+khh7f4Ztp0UnYnUvkfkqDLp + 97IpGKxlx6gAnRaX9ie4IqLQkbOsdzDYfiABTEF8R3/4mn42aSsygYjmWyykwCEpcrMJTS8yvmmN + cUI4iMNWlQHmI8kWWeUOkK98EW1PDK7k4l+5MI7ZdRQ4gxFfjrgjiKZwDaZKmOBDEtAlb/hSCLe2 + IZlrlTIe8DBG1m1RhxESsgcvmCx1PCaxvpVy0E9pgin5mkXechYzxzKdXpX/ACY0tDmFTyGvEojh + TfcNxrV6/wDkVxWGRp9ZsEZx5gAhclDGOzHSpFqmU9+yUXwis2IwYJzx8Klkb5YoruJEveZxDcyV + EcksumdzoMwEho88TUW4ry38LKFm+IyjUD7SZcW5JWStbngZNofwBZZmXI3+cqTRBFMkxzEXLinM + 4I63KsMk1sqqelcuFI/cvzI8TWQOXmBGJuyAdTTzaMoSikRbOMQIjgEFFILCUbLZeHF8QdScMB8e + GNMyUhbcK0lxxCj6AcTYaPHEKl1jRcTzW4P9Uo7S65qcoF57MS9wwFsmcJolznHACUGqJWnEyhBc + ymSKxdwBj4YYs6sdy5dLL0VQL9IpNqJyYDXME4l0VHUOV+MpUXmmBXEhmhyQykRle4dalIa+AEpN + aXw/aXqZysuUMbotfNzIFnalDzFDbbitN06hYtWYMvq08vUVZXN0BHdDzufvMPz39kv5Yt0bpF2W + 2zKaepQSbX7E5h1swnNyphjjh6lmrI4ri4Bq7X8kubjhgYX2xH/x6OJXAiwQWo2qMkmwVmYYxCy6 + alk3A2n1Rj1gWvM1uPM7R94C+5qsNyUPw1BNYhGDEEHT4EKXDNTfZcc5mpXL8DNnUYYgm8kCvVMt + 8HZ8DOSWZSwcVBNiDO7MxDAZkxLmTEE8cwYlyvcsNhIE6O+YCurz3LQuEm1qXftCOu5HUacAw6j3 + +pdr0H1zAMhcqesQbNkt+4S9FVByl56OPxLGV6PEHbM8cHmdtIMYxSwIyDORh04g51omCcgcZmDt + yyZwgrrmJA8mXS1RBmXFu2pQ412Ii5OoNtz1NJmNWHimACIGDUsmblIVmWWiZF2wMplcq+ChJ6N+ + AYuXGJmIgNsyYGcyr1xHMxpmDQN/N0xQvqNCAlmmEBwhTBHqGPNTNR0RLJlL8Ipmj0FDq5xOeYyV + fGG7i0pQhyEB6jCpphn20SlijSAPcNbix1EnhqEBjeWCxwjBBVamMw1bz2wQn+1HmUAarGFgZoeT + zGBNLzKtQax3E7PJPxQjqwDVvCZdrmK4WeowIy09T+ss1zgJEr6QMxJuWWh4JQTdPTLS8/UuFe4Y + hcx3cDFAyHwBs5lbwcJW3K3EwpNR4yrPMwIdEqO5pSyh1EvZyS5f4o3KkxzK1mXbCbI+JRVzNlGI + bM4HwuG5hLqVs2eoK1VK8s0XUVTyKWXnIT8SuSqaa/5Lls8ySVLuqHnUTJbK48RzZE9p3McqJkpQ + TkPG4ljVY/VlrRmB6qMHRgOAajWZcM7qHZAVP0i+p6gkZWLP+Ro51AUJRDPMwbYYw5g3Fh6Ki7Eh + 26qYww4GWAvZNhjzLlROElFLjtOUniG2UIY+OqRNkcGEZxAOYDNviCiuo7EMsMuNTIRuCsS2eP4q + iMkWIjhgUYhlFcPw0kpHklCW6im/xMJbCOtA/qNvPtnu9cvasn7/AOTGi4e4wP0koTU5fgcfRhHw + EaVcqrYUPUAVUIhFHMVIva9yE4aOsT9Akfp/cvZbCVOig5POJXAosRmbNdmpnZp3UfQ5UuYlR4mY + jA41PMXCnJuUEAUN4mRaszcOfjAII5cXU7W3ZDCMsFc1L4YjoNR2DTErbTQ7g7EkSPFKGaWfWXW9 + R4MoM1t5lvRBaakvBVJUJIbQbSXamGhNz4WdyjDLItcTSUMzbilK+Jl3UFh+SaZb5ZmxofmNKKL2 + 6jKxLUe042DAzIS3A8pKmGDDnXiPH9HiXUsECvMfp9oVa3xMsRshK4Yig6YQzJ3UZ9EBrzHWw2nP + 0hdf41VwBfLmo9TNeMwGSP0g2bTgpon1B0SF7eo0SqzEcXFe4MbBKwW5SiVU2in0gXmSnGB4YOGo + JoTggQJZZhLNI+Int8YNGD6YMhNyfKwPiOURO8mQzMWpcdfxZgsy7uAVJgXHESposCtzDEmWY0fd + h2P13EBjoNq8ExhnQxzjX4SezFjbN8mvMXvBm0P7ZjAGCGJSXv426cQY1BeTUJDy4iBBrJEBAtMp + OvCSuo74/cHbH1iAETUjDDQ3+iKCt4jG4PFx9nusuIpqsQDAhi1qaJxskuD8X76TOQyhNu4PD4MZ + kTZsiZaJhBi4IZ1+tT1aFyL8CpfihW+NtXcOcXiGpmdqVibGlx2QyYipXCKYqMGtQBcbmJaOYRNX + AwSiOpf4uXVTzg5hnZyV/ce1SC9Xv8Qiu7Oe4TQGVNzKC6xH9y07ql2O5SDdBj/GJgUWI6x+peyU + bXA1EYP3NhwNzrasjcM2ReLA+uRZkOIgWukOGrXuY0dJY7CnmWukTmDyOYoC9eJ9l8wBQ5cwYlw1 + LmoxbAWImMSl5mswRe2ok4ipmYy5QqsRgLXuZk35I8h6lyZizWbzFlCccEaiTUzbqWMDCjBeSYJe + 3wWfFgq5hmD+JeC4gszERZKCsEwjMo7h4wY4hcta/aXq2lPicV6nuWnBEYU3U6eYZXWt2schUunP + hBGtManiDxNat+otQimt4RE9JCqypzwFvcOo01La79E1DRag/fEc3BVyV3DhBzTOEhy4ipm/rBGR + 42mKMsWfiSPkttLiyVG9EduSCBk4l7UFozxLm7YZkm0alMnkmDIcyqXeu49fXCEPDDMSrm1hv4Rx + uIhGTEz8Lr4MEujzMj4KyahsSjUBUBF0SlHBdGAXNwjm1XzklO13tQCs4DvjfqXn2ziFyBq2Yl5D + dtzbovwZjpxz2puNgdsbmUoY6JS2PmTnDxULCJwuhtJrCJaKxB5gJD+YmvjAoupRYIxEuh/xNxMW + Gcpf7imr7Y/MBYHdafMOn/UIGUrc3xCS5qIHbMiKJpBQohOOfXxpOo7jUtqbLbBp3TBF3IBMFx7C + NlUvIKjVkohFx7SRyFWbCFSU9RWkZw/rLypZoilmDmWzeajmAQCmCMkFiMNPg+Uuufj0GJT4Tlix + UcH5F6hhb9JkeJcNge2XMqstwfQziPKt3+f4l8lXRR+UpECrZf0wkWWQq7Dvx+5ZlIMF6lCIu70Q + hYDBzcEvojj/AKljQ1DCUUO3lAJbNph2N0txYXBX5CX9QXf85X6jqBrYUf7xA/hkb+TC4l4T/EwO + bOuIxcOk0x+sumX5Garw+ZSdpdMQ3JU1JTZp4jBMOZUtY7MRxWnXwFgEqYWzdYgKrmVdIZ4hygmw + gOaSzSxhpKgLRzS7lZ2dErFnwEqKlMrHjblvxxGYUscwxLioiu0yQYqoOZSrKQu4sJOJFjSnP3OC + BRRTsMBEUiynNeAN/iWbE2CflTAQ8kED7UQHeQKJSgGGj1u/sRq5wv8A8t/txEX+oV6+0pi7QRUQ + MdLiN/WeGOaUW8jqOk4BeiD+oixr1KTm6Cv4lAK2xTcLf2g/3Eyh8wL7rMbwN/C1++/rcK1da0n4 + faU8jsJUAKH7zILj4L3xEVei25oqWsViNcMnE6B2IFOCH1+JQabljlUuXmY5jumGjuLdE8DHgO4L + WciPDU6Bm4SnJc2tcxPuDNMde8a4qp3IErTeQXwFoMFRFzAlr4RKqYEu4YmkNzU7YRJYiJE3ljkE + xUR4vUqq2dxLRvtmReWugebf6g4/rwfvBzS1yrZV7bi6JcShvno/3pQ3nZF5/sZ+0uppGD+lKLYC + xTT/AORoaUNu5f8AYYYJfMmBeZ1XoEJh8mwvn+/tAXtnL+h3X7mqo2FX7r9n1gGww8qGHjm5h014 + lYEAf64kXnvv3HZ/zDhSYzSP8MQDeKkaMcJMiBfzHMglfhA2yRld1Lk/pDPqYNbC2BMFx9cdZTo/ + eUGGyXvMFMDDLMIwLKltyjIQVh6l7Qh9liDDoXKy47hShlliY4mozBA5hVFTEVUoYr5MoRKlcybg + tEcG5bLFVMZ4O5bniP77fEShumDqXm2uV11/aM7A+j6TMqZ72x3YA+0TV1b+UEZeVglAb4B7dv6h + QWXYzDFE2diZBSwVgTCQGhVl9QqB9AwCZVQH8zAnteP9/tzMihwfuDBXHZxHFjOl/wADOfJh/ojU + CvA/RAda/K/qWL1ddxmlZ3kjqvtmSHF6GpfDJ1CKT7bjEWEwJedTLqrzKQ1nMWA1uBzBRMHidRSM + rQNkuZu4J1+Ei3UQ0y3JcDfCoGiKEyZTLkutVNnibRKC3cqiIVUbgjV8NhVcog4ikZVx8EawbRi5 + EpeGUTemkoJaNpUSXDMTDGqxDb0ZYm2LwJ13KhC+3V/zLvKcpz/yFgW53ULAW1rv+B4/+zc2n69f + 31LdtWlgpeiP91HXRt6H/wAZRHBLiWdxV3RgdRIyrXmJ1y34j3sKTPmBzQ6fuKw2VDy5jG7eK8+Y + iwhsX9vP5ER6auwVp7E1y14l3kG9ncdkGht1EKlI3UxcBzA1+725JWshntEGEO+YjWlcrU4gPJPE + uwZtY4uG3WbtgnBcSyP0j+4kuPMDMnvtIiLNcQuMEAQmIJS4koyQgN+Ic8uABgMD+NHyvWM0SpZ6 + iXqsckAnkgMEAcRWbhwt11HtMiWZ0VU5ZK1sN3MzMTO1OF+ILQzfOJmEctoXynPaBFrEsOOVGnXj + 0dS2R0wxRcU5zubi5KvlWD/fxAylZi3dmsfh/X3lS6H2Q00vP++kYU4D1QSg+pemCl86jNsalLPJ + PD+5c4Pj9wUVpcwLkOJQvJ7GIQVO2KAArL+4937fttbnscI3uJcno2zvVdwu6OIgLHhk8znHsQu4 + DQ3PFYVhHAeyM5jYKwEW1VRpgmjYqKw3KFQMU+w7hWDJwRhsmNWEjuXwEVS/RgFcTGM6+SLwiU2a + OSEFMYik4ySoaO4Zyt1Mh3MOKBKIlWoOI3Zf0gqXqGWopFyIrCFphIgNpqUuo3tXxDDqHeKo28RI + G/nEpQt74m0L7mQ/eu4WdYbtuf8AcQ6tMR5dxAYY6ntm2GwfUvyDFpPcLg2SUqHh94p2pcn4lCu+ + JQ3BAPabX3+ZaC2nJKc0htcjQ7hhUrzK49YBD7R0LB1MRm+6OWPtNmEcJA5jN35lSUbzEUMWopFK + OOyKIqzkmJM+WDUqSjLbZAKwIgynDMZdPUXlK2ZxdwwngZmOnITEGpZCG5zBO6WXTKivwGEYeEOu + MBEqleIBdiUNrDO8ib14hCL1xKyEm9StEv0Zg1ywvW8R6CpNFzkuXKoG1fxC7NRyOXULT8yruKEZ + FMa7gIxhvHL/AB95VHK0xA6EcERLVr7j+Yoi6NzJvywoVqpX1MyusUmKg4n1wjqO9C2epWa8YlQN + dEpwGMQMI8e4mG6c9kJjJ5JhhgKvBqaidGFtd8RP4WeTNwpwmJcK2WDaxjhJ5IN9oPiCpVzBrZxG + YLH/AFS7G7ydSuHJ3Kg5RrkmNhBmxPcLyImSbD4YnCJUOEq9ypZ9xwa1CsuHGomtGCyZmGqlKKMW + ICIUwBp3NwLZjzB04meG5QYHmFzYEpVmEKimx+AXgamXlUYnuMXYu5drVS1NJCNIaeEovO0MAA6q + J9JelMtyqla/38TERuHzMzrxGuuoocPEsThdRxqb19YTBjhmwBmmGgqpWt1Evs1Pf+/iZwxlHLee + oA+hmlI1rNTcrfshtu/gihTL2KlW7xOvM9vtGKAwB9UXYpPNzAB9IYNh+pzMm0Wqj7CG4jR0gkY4 + dMbC6ECx0jiUvb5gOp3RZU1lFhiVUqHZKzMfuGjuSvPyLZAigtvsTwVwaU3MXEumoo1LbhrBxi0y + zvXEtrqDBEZycQsFyVSxHVG2IUbOYHeVhqjDLJ5xCNV1ArEtBWVAmNFZT5AF/uKhUzJZQdwpUsNQ + ssBRDPvcYDRpD688B9JkNoRv6wQArUasriNKo2x2yoijoYY6NGbnlvmaRC2YhFsZlTGK0szToj1K + UYioplLEH2jHsnKB5I4tIw4X1BmOHQ76mFs1kgpappGpos1OWaVHr2hYrzGew5ErcVADMEYmeKiP + LMQuwtn9xDgczGmuZQ6sRLOpZuLfhUZAiwpLkMzHLMpbuX7JdyrbwjTO0QzmQVg4wKczrApoMzGy + bnXUWmj4Rrh0jrVhHcCVZK3ft5j6mz95UuV/j4UTWJRKl/TniLgG1vq4h9lRaqzNnncHtL+OIlGe + Jssde5l7V8fzG1UNZ2TgGY4R7Zl3gJ3QMqzuXlEFKPYwX6gDUXmdHiK/OklBXXc486xqKwpDVGOG + fhCaFxbn09zONvEO01h9TU4TcSU1zLk7lLGtruZpmokxvukBmIgpMI0iilaZqNmVJxWSC4ISVVnk + UriQHAcniZtZZkJAcj3KTcpWVstUTwzLqFOe2I7HPECrzK9imLZUFYgPDAbXe4Mw+jApsJgTVGYH + A4zT8eQCdtV/VKh2EW0Y7jEv4Cxej+ZRg6mmq6ah1MUBo5hSOSrEaUNLIe7V9E8zWAHETwL5gAKR + BhyViGmtVyzVT3A1HuGALId3O9mGatuyUNpuVkoJ7mpNFx7e8wsERwuCS1e7mdc1tiOHJhhReOJn + F1BR1fD5lLBY+PYG6CHeYm2gxBVpvCsHzQ20LVyl7czIviZuiJKXKuSy0qZ2IW5qZKdxpz8k0dh3 + A0YSWlUzDIWY7RcFK5gDpxeJtU+YAMMC8FQGoHqhPUuVgyPUbEAeSGCIkQFJZGYlb6vUBXLmyLeD + Dsalvf8AvzOhjbPuMuLmT+47TNFUf9+YKFScStexUWoS7LMzuCzsQrAubneuAs34zFByF1B5FlDS + V5nEF6WMW5DgYIgp4HEO2ON7iMuUTaKgrrrtMX0MkqPS4Z5II+cczY4lhQa9y4oWe66YrVxcsA1b + mIBwKJNjco4go84ZWjoirkhmcxK3K06lAqN1ep2I54ml3BurGXraEP0Z9dR5ODpFermW85jVKsYV + yotJkg4A1/M5OUA1eo8rUt046hqESpn8zXGX6fFE9ZeEuZgbhwitiiMVWMBaxRNhK9+UYAkdly6S + 8E1jrEXUGF7I9FniZJJS2n+dTK1/vEr1zYxnHECIV5wwMA+B/vDDr+vcWy9EZzRjx75jtFG7OSYe + 42QVSnuJ4FQAu1kTBv10x6WS7uDlK/NniLuhqa/6lMKnzErr9mI42tc3BAwajAirOGBXG/WMkyyp + 1BHWU4qotdcVJohNrqXBmttkmK0fJBMN3LYJZZAsTLx+DXSHtYruWO+UoqBfUEgw2FwzNsKxOJkT + BH/mc/UCbzB5RgK4CuNE8q+CaLvXmXps1U/uIaitXImT6OoXe9wGH8wvOc+Tr4EBUVn1GRdBXVXF + 5DMu49/icHmoqHqXUTC7jdBg0iA8zuIXK2ek7DrzxBUtdS3ZEKlfMLXv7KQWoXMHKY6ABeWrlrCY + R4TEsF6eYpKLoMcTK/FCrqxjGWy88SlNDwWnqF4LaQjZGfDMDiYfycR3iy2YSuIbHVQNtg2uESSE + pF45w1KJZgz3LKRLkw1HkB3Ct9GIM7lmSxTKDuckt3CuXZO2LDdPWeQdy4RfUw1Z+UcVZ7h4stip + StY5l2ODqB5FeEb5AqNZaqPAoeYhYcMFQPNZ9ltlPLzOqHM/cuRK9cmcTDGXkZIADkgiCyBh7WhF + fT4uGOu6/wB9Kj+8C06bjrbOQruZBzuBzJGtle89RBnE9lpwXM37xxLPPi9TxPsYBdjMjFZ/Mo/e + Y1b0kEB3LAG04mCdTMG/CUbNemrhyRiAj3L4g3W4r0DmXtCxas8viIwGKPB5gPMsxMQ2Y754/MuJ + zHkhtIguuLI7u6omYviG7mwjsfWaXlnAKjRq5hTSVrURoOFsBUSWOXPcr5NL3OZXszGER3DkNrNg + ioJrH2lywuBk+kQGEuIU56lqhiaF+cz6DqUQX1EmWFDe9QfDN1ESHQxj8X6JYO4aMPWLekfqQBDK + P3EGc19Z3Q4e3L+pcLvc3zT9Zjg+8EhxqWLdti4RDxK+efKWtouTmErDniAvjwJArvpf6g453uoO + m/MTjio0J95ZoMilLCbb0XNFSFQlrMlQC8jhhBAVJcrdRsc/eZx3qmX5poG6gdw7IVkmNJhKKrOi + crAEee1sOyqDKq+4hLup6nShhyCDCiKc9oRlb4jOGiA7ZkxNaSEQn1i3c5CGTFvJa+8b9BVOBy0j + qDBoG8LlI2TpKl4hZSbywqXVxdKbTMolB5JocowtXJNr6J2cTJKDQljai4VBcpklZcj1D7vEFaW2 + FFz8eGmWUnsxHY7l4aSzr6PMYa3J+03yzNbFMWFQfY+iaHF5xByLvFxoMHKNxT6vcDlNf79QMDtc + G4GDXnMtiqllHv7Qq1cMibmfIPiCD1MAmb9xjiNYTZW+yLLo4dQbZU0+0Le7H8y8rGtcwb16D37g + diUEBM0+YAZFzmoto8XcW9nC5mSKtFwk/NDrwG4bL8IdgjLGF49IO3xAKSGNRKE2AIY3RMZlovTL + ZGQ6NFk2kdnE7SppmJSF7IFmqEtduTOAngn0oE/iNWscwRag35lA7rywkvDm+5YZnmplUmMQ15cS + gpABOp4olpqxcrCgoEuflKr0wt+pa9qlT2iO4cSmlI9yPlh5vaJWJ9OJi8l1TDQYTatMSwbOXmN1 + 27mAfcWbDPXUoC1VFlpLU3u5dNyg18PXgXFBF3VGo5XMCOPEv2rK6vpEu9Z3uKPIWW4H54rkPBI7 + htWu4z/sxHeoPfTOKmskDWJbcGKSCUUL4JlATD0mCPRCqFnJE5SjuAFQ1mDbqW05zMDcPJGbVymL + Ms2ywO0tyDcTBFpwwS9ZxrWIRvs6hFMDvmDd/auIevTncDhxttcN/RVmIl6Oe/hwR8z7wzn6/JiF + 9H3hfUYjvOT8TCTmZ15KhEK9S8eJQetSkG+LZeoMccfdM+xQVXriGLwsYq3PDUqmVanC33zMHOlN + uKiXTPiO8Dy/xH7AgiHqOwlKbeuYoCZsxF4axBE+JhceY8+N09Sg1+4WriMil6KhBpXGZYiWko4M + vDJ40+proPHc5wlr+WC445qLZK5mPulmsq7FDF3gRxkumT6R3Yc5TUOFPPmVNVAHXdsoUd3xxA4C + a47CYGPAmV7zkTNmG7i/+X+yLHK6NZh+EUZdMswWGUGEzagmE8pSupyAc4OJkFud5Jg885nVMSzQ + j4j6IzuW20MimZXMqVZgmGDuG1ygjJq5QaCqMP5cwKB9zlvhwTy1wyh6EPmqcdSmWUMtoeGEjt2u + +kt3P+xqcOJlAb8xRH9SUpfuic2ugwx84riYfi1axiZCC9sZ6/7/AOSjFqdHUul0ZLZq9mLJ4qXA + xHBio79TFeJBdad1KDfyQ7pUuqYJrxc+qacQnGbm4j94gFK+0Qbgg3Vj+IsHRMEW9zWQGLi7g6UX + RaFdwcEpCgNB7yhTJZlqUMMu0z/cauW8bJUZWgYH0gxbPjb7Sl6LGAn8RVVqZoEt6p+SXqA8MsCA + qblmqJ6moE4Ov+zFClDVZ1ABZUA33RV21MG+FtpSUFXlqG28rYrBvOecoWYA57id66H8wWT71xtZ + GWmCICVrmL5gdLgHnsxaF7K1AfFVuHj4r7zTb5uXWE0BrzMrq/biCmujm4mnT7xicTbs3Uz/ALgC + XaQhgJ9oBQZ4Z0mE5OIO7Sg8sU+eM7lwmHnEOUMal3N0yS8qcle4O+88xwRyRT98TuTAvKJrM4mK + ytin/sppUylgzFe+Dz+SJwJsvMWET2mAu9drmMFGDZ9Z4yj8YiRGyjNzeLlOKlhbB5mrxqF0xtNw + tJtOAmanpHhg4yRHBaRdf8IXgExdGY0CvuXCpVNOFTKtH1Yg8C4DcyA2Mzcm3TKUpbahxHYtOdRD + KUu6hBcy77got00a6/EwFKazGaLTJmY1XsqmVIHqbC9RX0t+uYAWssY469R2vvWohwDzUBmC3Muf + B2uIxm12+szHL3Ay3CmzEdscIDRNwZx94vIr8y4XtOK8aYyi8zwlDlXPUd8S5jNykozldTbgPW4C + 24ZzBaPif9iOdFDdfRKKp54mTj3Bgobvcxq2/iOS1F5uMKs+IbxJc/AgEUXD5e42+GY7lDqGVf4i + 5WrdNXDMVRVD6RCs1+Ep8Th/EQ2cYvUXDsM2mmWBRme5nU10S7k4YQo0zC1FrivVyj7C5vVjuAOu + tJiQoveCEqi5+syebMF/hjPHZLqI6A7tm/8AEVCaVUIhMdFKKTpLDFxMBr75i5JnArxOnRjGogQM + y7emYW7N55jjc+XcA/YhdA07pJUOJ1NMx/owu3d3yjaR9YuI5P4gcV93h6S1Zd7lluDdTNuM4dQn + Z4rMSnk6GMlJq64lKFab2xWji79/Ey8XM48QyVnOpfNUGBijVQZBSs4fxDtHOYqlDMGBA3QsIAGI + mLhGjb3KzmXEV5PEKWUc6ljs4sRhgv7S8tRiA02QKLjT0HqMK7CnP/IYLrzZYaFoxffU6lO0feMr + zxqh/JMjA4MTKpZqUaLOtSmZliQYpd4TC/rMeDgKphVtOLYDp3KX2A/iYfFB2oV6wPEyLLbgdHLv + DAAKCkFsjGLB9ZkCl23RE+l0JNFOjIckOxvOkzAn3gCM1bzvUsqsJp+IrsF9j1iVzYeVv3hCm7Uw + zQF6Op3EWhOjw6lSwFEArv8A2JYs8as4/wCQENBxM86TdRwWjey5ghj6MsFdtheYNdncyPfMQaW0 + xTdvF4mBhvTcvZx0IEkeBL4gWUrvmYFl0Zj3OXqV7V/jKBDaYCpq3AQRqpWy8TDI3BZNYJYatds5 + +YgFypgCZMm8y1ZjHEALp7zM9LEdt6LXBa4Y1fWKEAcbX6+I+x8iZW+frDSjPcwocE6hV4lVSm5W + 7nv+EogPBh5TfBWJVLRM2iykg3x9oUAOm0ckT743dkSizOxIB1BuXSTeyIjyv93DrBpqqhaeH35h + SquGsYiel0EGQxLN5JScuCjPuJvreAyzly4BzCUouAIwKOTt9xrWEOc0QQpgfX8zSxhl6meA3f8A + zUzrTzTjQxYsTNmX6tQfaGAFcl0fjUxa1KqxKn6QagNdCKZ3iLFsfd9oP7FMdxvNdudEwQ5qKgan + d8PEops3bz9Iqoz04Z1KFTqO6ji2+qKPY8IXYpXNcTWkYcoVxxc0sb00JWJYFDnuAWl29xsD0mPZ + NWgt3iHD2pVY9MGM3Y4ZdTnT9JRwGZ18kejZgaKo1ComB9RPWr9Hc7g4hCN3X5mkK9RUX6dwy2OX + uCQ5/accPuvfcYWgU1WBA/kOZVBGcV5ZhZXiXP2fvGV257iN1Y7SywtXLDdsxqWS4MyljHMUlaG8 + YKlBf8JmQDU5+h/UftLZvZDAfaA66L3zBhsNVeY1CXybiOXA2LYoyxug/qUcP28QKrsYdRCwJdIx + CqAG63MluZs3KNhTlVW/8hHViPEUjMkr6oHwK6dwRy0pYDuOQH5RWdZ0OIqjzeU9TKZeFuMdPMuj + E7PUrtFYZX9pfJZyTk4AoDdxWU0Grm88rheosTPjfllojhVvmXZ+79cZg+0fqv4hu3i2ohLeoc0g + sqANC3Uqci5WSPqOjgJYKwti0jPfiobmGFhCvcRMdOcOoTmlRLxmOAFU8TfZHLHY7IHlnOCr9ycS + SVLxqVkznmYri3l9YLz2Mnnw+JdZo9CJuE9hBYtOMcS17wVDjzOod29xreeXiAktczLwuG/lKe1p + K3sdPEa6OpfadVm+4qRcyvSx59zMrbUFDjuAOTcW0OJsbO4izG4xuKAe3Eo8mCrcrABwpywor2OZ + lWml157llllfWpSJvwTIuOxuWdFO1MCtubZZCA6zCHmpA/y5utptvEY9sHXE6gdSgyvgq7hCwFnC + iBQyBeWJ4QY0dxzyWFo2q+pjpaY5jdJOXlPSxVoeVWsn++s18wS2T6EbcHQJXgoIfWRYzEy7Cz1c + qDq9ktgAV1+f5IFfRpXqXik/VUMbpZZUP4mQ06QCysdcSwCGO2YHLyZn0BLfTtirSqrymi9w4jNM + a0sMXvR4bh1M6shWbxNBqCTsacky/ijUzBCkDzUMXRfhFg7LDKThWnZK3n903ErLG6cwWTuZBFZE + ycyks5H9Sl01Gh0mV6gMghz3WXipVl0ioZg6rqbVwCP+0cy8VHqn7z3owcMLpHrFRqzBVoB7S7M6 + +5Ay8Z4jLshhu5WcjZeoF4gwoUWccXiIGiuEAyFRsM5VnmcIpx34ixu0cy0VjWkOhve4DyHOD8yq + W/VUU0q2teZQaePJ+kz3Sr5zQUvkXmJHnxLDKj8kUBJ45RA2ars4z0w4cMtGcywoCAgoZbF2JSwd + /wAfaNtIX5OIqaDxEy010SJQgq+wlNmF48JSAewcTzawlfBY+64DO27dzMyrInME15Q1EpxCrSpo + 1pTsA8S6rnB3MACuLZaXD03GrleLl06j7witdsNQvU48JG5r2xDRtgcwqdPGX3Lr/gEQTaIAdi4F + UWleHmAmj25lI1yORR3EHnl3LJ3zNv6igUi+rzODxjCiqCbVOFjahstCol0pjIVucRL6isQq3b0H + MzZxrwmFVwf9hUgLNK2e4IRFnKS3D9FRF5daMWFL+pt3XDVQZVPaZg7AUpoUeHAxYT6DiW41sP8A + CIKHsdeoGgL7ERJXLBZCmiVzbNzpGNcxFul+kv0d7IlThfZmGgssJ+kwLz/CZYF53cBXnU45mda4 + ZxEA4aEr+IctWUL9xFi1trTuYsfQuKlx7WsxgYL9kMQVKsaanDdixafU0t1XdxpyekIvK2cPP9TG + Dy5IVsOY4nSumfMQylObH9JQgplcjM6CjFMHOtg8TUF4sXAvMvOTJi5mkAuhWjd/SOcVjmYKTqki + hjVCv5iesGPUOKhAaftiNoWOS+88TQTTsp/9mw7DNdstabOpsCwtriBeY5fxMKzAcaip2z1KsvTz + zL7s95zAOG2Y5XnlhqmD3NQhFkUacg9QXtd+eIyYDlY9BaGRqZ4ws+IGSN07lqJN1KzbN3AgvsR6 + kZLcxdmWsX4jsw8q7m3zGK4B9SXNGhcsQRUHhYBscK7QG1PMEUxbzcaxoFai03A3OWYOTf4loaEe + kq/wTKBjEGqzoI3MdIHUeRK/SYzydVsZnFnxcRW23cogFpclscC9l1c1uZ1Y9wSrLx1gQxG7/uWR + vI49QNVrV8XBm0nFOZR7NZqjqJSuHARij6Dgy92RkPcXPOXG6lfGeBDoDw8+ZjNPcJFg4liIDMCO + YBXUVmWFpgrM/wA1EpvF9zwp3mA5FNXbK4seB1FZZvHkiW/xmDUCws/mKsh32EwFK6y0MKBFuziI + o7CDORZXfMF5UclRhgvlmuuScVi+4OnGLggwxO0rloziPM1m/EwzwfaZJbOCAqoDkUxardBmF8IJ + eeJvGscxK0cO5sduAU+8NyKuCzsdvUPMTUrjZ3Lc12DxKjYG8uYIavG4kVjeV3cRVziy6JZRrPMo + 7t4AlWyCsCeazMmvtUB1xCmIpYJxG6ZezmJc/hgPO61Bk8WszJq9qZhirl9sKNdjeBq15s3K2Oh9 + QTIAPwjgvYUcSvd0rDCor8Qd5gcdnERUG7zBXB3gysdPMCwbErgepUwNLnYHl2sv3nFuDL9pZrLa + 33LoqoLQdfqNohWyJassM0p/Ty8hp9XAQCqmMBXiPSG4mNlTxyiV1P/aAAwDAQACAAMAAAAQ2Bka + WXEZ5b5eSoPlcDFHnEHIyQ47GQq06my+CcZEkt/966GCHNKgZEcmv9YOInA8CxWQueOwMAQSWN5Y + 9Z84PAYsTa1cVNZkYeATdhTJWfIjBzHz87GvcAPrfEzONH7gZ1PuQnKxdxaTclK1r542LvClcclH + oBhrcbc7jPR+EvsKvVrOJL772RSzC3BXfj04YEBK8kf11C7CSQX2fo00ZiUuNBEDKyHBZftpkGsM + KFpi9yl+OaOoqAdStnD03qDUqekuGIDVZmOyGZZ2YqRaAdoQfKIs3rRaEUihkhndyItFjrGvmt5/ + kp/tUuhbkkih9EnLkNDGEkmjuGQ9HxeVIqsHtvov4FDBsPsdsTkMCSLjhYJ8fFpqnEnC67IdBQDw + a0TZLEStfTfO9K3P34lSrwflhOfaC79GpcafVEIFa0bzDPwzFe+wKY6hxSHnraFeflZ4YeZmyCw9 + g5bUNYeaQTCOeN+bZA9GvJOuzNlEZi78Q8nyhqhFXECrKT6D8HTBnKKlh6C88y/nWrGFCY1DkLZP + fRZ7KnD3AXNIBIm/D8/jd3Hc2etm0yJFprXCxjVFw1EFU0ETyBxDbSB4H6EXtQOdwhp396dsDuPa + P2bG42QyHeDWjTUSYnBUCrx/WdfTKebnWPinVTLavq8pAQ8zSlq+wdSEKtdeJqpOn7Wkadnkmcgt + D16eCZ0wFqa+j54rfyG9sOUBhtpO9sXWxF3JIbHURpJJw+gXaghmWe8LhPW1Is2NNMs4AJX87WNh + aBy5RqZU00OooEW76FJJBPOWfKZmFJQt1z8k7syEuzIUibqf1KpqHJy9VWQMGdGzLH4NRYMeX4X1 + fRoSBexaVyKe7zPZRZu0+NgAYEQZRBOx82htVctSPDSsTaD0rVVBFplIb2z23E6AyyxXX0dXiO5R + 7R+9LQKXY+v1OIPHQqhGLRbUPBK223PIKCdXrA2sGhur6Tze9l/ZoRU0nLIuttbx6NMu9bc3OoHZ + sj+AB7yzHSVUZE/fPwcLNksMOYSJYQWkyWjJnNyCyioTIcMAzBVkfMkIh0KNRYc+F24tLoCPxw0b + 0VaTMA3lYTXcYakRlg4LKGUxLhap5zltHoZ5rM48dT32YteWPbaywJMfQaDXIq7zrQhQi6M3jts6 + 6Fhly69nsm7+WTT6Dsp7J8JvfHvShEb1HOtbzcrP7yzQErFap5QSShA9a7D7WUbYgacoTwrnjpUm + 0zNij6/fcv8AihbKEhKJHAuKTuHbIlxL6Nk648kxdJcDQ0fohZM5sDBB4uMQ73Ol+r/Tj9SUwZKV + KKJ6N+adoVC/pC9SNbUnoHPvTeB4ZTZ5jyjAAcIBgY1sL/pPHaCveeKaRJTjYS1Xxe0L7lE1uwIz + Akn+vzbxbjxo+EMLlhgFS9AjjDw5jOV80zzp7W7otxhsbG4FuN6uvL/DRc60p92VWFKa4X+5P7Tn + BnMrCu+McslNkq0OWktdyWMKSdyl5SUx7gq3rbwHtYv+DWi9kuMK7DBVO+OkBpSWDg7LpbWN8Ije + h+p5ZO6iG7rDqk96twD8m6Fkz0m++H10W2O/bDkStYwaDCABswn/ABJLUbHI4vvIHfS1mfpPAXzj + Hnsjp920DL22KawdQgDFdmGxPasaSS0QqVL0DsGODpKZUkBVI0xXqVQ5tCIU5RrQ15UUrgjox0u7 + cTHOn4Qjy4jtfMyqXXxCK2m8iv4ZvD8kFuicbmcUBg8gE/mcxsApJZ8EfLCSARVpCWG6gnRuhEqL + KJq/o2pa1YRt41EGOJzd42TBRGgc0LFRMgZHZylYizygTITuj6daNJHvhIrgmaumCvnIk0ZvBZaC + kx/rKk7L903rQrRZoZrWnehRp2ryo7fVFu6+VKdUt/xWtQ3on8ePLLWp7iQXaRbS+0QJnsIpGrMi + fAaH4ykmmPLptuB1N4ZKAMqxDAXn3RLObgJbovy1LM0w3DUGkc2xdBJRn+bSuQ//xAAhEQADAAMB + AQEBAQEBAQAAAAAAAREQITFBIFFhMHGBof/aAAgBAwEBPxB5Yyl+Xl5ZBdEMYxiQg2N5RWIIenMI + cKhsTvyUY1v4o2LwJnRMp8azDjCEtDWGmaUuGPNEyjeKUuIPCGX4oxsWELDGh/CwiaH3EEoeiHHj + mErwhzonngst6xKPRRIxITXB8KN7xcwQx/T+qMRfkstHBCFhjRDmUT5LK4MoxKiw3uYTFsjNlKhu + iWxI8IJrNfS4SotE/wAJhCGiEIJfCw0TE+GP4MaINDWUsvbJlD7nos0WVwUIQYlRL4a+UqNOCzS4 + fyhMpS/K/eAv4ja+YQ3lYawsPEukJvg0YXfp4sLWIYs0o1sX6JvRBsZBcx4XD5mlLS6hSlHh/LWE + vtdGJC0eLOdzCHMCf1j8g1sQ6sITKOeI0aG7KESC+Gt/D3hczB/CEOML9Hw8xxj00j0iGxYSzM3/ + ABXTrGXCGgSnboQ2j0goeIqOsPnBNfpVbVG7+D9lOhpDJQe8QXRYTw1hsQxHGJhi4TLVQ010b9xY + NSkJtFpRDY1KSiInhYQ8v4pS5uCexYaJhaOsDaQj6LXSTa0hBRsBJp0J1suha6QlEejex4J7E9Da + /wAgxKk+EPY4KLaEhqkmFlMnXzpcspcLDExspSiOMTwgkE3MSbPbNEEttjRWxMlpicnotPCbTgg3 + JhorJdj4Ir8JjuGQWW/g/iYSpYLCYQ2JmvRN6Em2MrGUo83MEcKUWGsUTHhMSI4L9ZXFwexsf+0T + JVl1DbaHpTYaNiu/wqe0MmbHCpTbxKjgjRDQ3hRPLdwmNjyy+FgnrCyxv4U4QOTQx/CzCZo8J4g0 + TD3RoW2xWCEh/Rz2LuhPRBUK9ejZqZ1sWC09Dx/wa1gi6MT2PCWhqjYsReC0IcLoglDhwJ0gxvHg + hfDXyulG9lGxiJ9P4g0UuUzHbsVPQl0NoN4fgTiiKWJNCUSo/wBGq7+kiY3ukExwGicKbGiYhwNl + YYmVIqfBNDY8dFwUWnsa0IbDcKSohxCKJIY8tURPsmUbGLFKWm+4aqH7HpVskJsVqi9IWmGYtD/B + uLReCKwcsTqh9BmgaqMUto0KFRUVI0aINYSE0URZh7EG5oolSYmb79EuU+EXC+H8oZMLWNEbCg2s + ZtJDnVwUlGNFA1We4NYQf9FXaWDSSWiWjiDQ2yTpRYPhxvDcQmoMQ0hifF0PC6EMl0JMuTQhw6It + w8Jj+V8JEHhDylsSmPpY4IQhRDdP4Y4REIbZXR7C9mIalFIbLY2LSEz1IJ+Hnw7Fgh7w+/LTEoP4 + MIcdNyio2djJvBNfDJnSHhMXzR5bKJUlIaxvQ+nRfRoiCa/TQ1RMIYnppqj8D1iPcUhrZBIbXDpD + Yn8JaGhrXwkS40xajYwuibOsEz3LJmbITCJv4rGXDRsaEhIWkMuhdGrh4JFEkJ6olBDEPCUUNDo9 + wxPRS3oseGLeEzwSzXCDUHghF2NISpBQa3TwXBui0N4aeWIWxZNCRTuO5YkWYRMdD5jwWfRE06KJ + QaIWXh9FIdZ0XCEXRmiE1hbJ8IQ+YaEQg0I6QWU2PKGsw8E4TLIeYQeG9iY0nseGxIVYYj0TGoJe + ktqnohl1ko41BMg6aSwlSYS4aolOiOiIPEEIao0TBj4JFZYX1EBJ6N7OkEPEuCGszE0QgzmWsJhD + 6eDCJho44J/o9GjozdbHItkEKKP8yqbiHpxeDrQza2cxBRHTQyjFOCYipoa9wsUXw0eDEhGxJpmy + 2aGaTIY9C+EGsQaJhJiRKQaw4XNZsZKWWUaGsbguFiNm2g96hqxmvgvQdvbN/hcUY0QScP8ApCsr + Qp0emLZDzFmKdIQ4OkIP8CZdEjEmG6sEvlreITEJhiEJcM8+OhFGQ0Uf6y/hSpDaw2wpRnfpSl+G + xk2RBsTFGPgm10TpwbEhjEMSIIMpRiX5ifdIxNZNQgxMPCWZklFh3vxMz8Hogxwjw792yHggxLdE + o9CxfwfoUhVHREy7mlHs4xixTg2xYUrGhohRBo6JDUwRA1MTDx0Pkxv6g3i4YsP4gkfpDbjROBPA + 6ENmqJtoab8I7CXUL8T+I2Q1DhROsfRtItyx4eOMJm2Jfo/SI8I2cYwkOFQ/4Nmww16MxCPMOsT9 + x3MFhv5TzCCwlS2/grUIh7Itpo9EJ3Q7I9YxH4K+jHEGipEHFQ3csTGJj2hC5ofC1CZdDEWFLBol + YyoITMQ/wNnis3+ShvBrYtODQ8UpS/4sTw1iEwiFo/8AQJtdGkEycY1RaWyBUPweUPZ4WVDTXROC + eEMuhsZs4dlqEoiHkxF2NU/ojw8bEiC+H9MXo/MT8YtIev8ACHv0hsSsSIccLmfgtkGqyRhpkOif + RCkx6KKobIQnijWhLCRKcExDZRD4U/o38Lo8rDom+PKxMaJLBfw7ofykaKXHC4ol6NwSpu0aZg0V + k9KLMWGo1wE6LRO4VDh7JC5ehJfBCyinRVDebi4XxPilFbYyGhh62Soa/wA7NCQ3Fkv0/T+hrDGh + ODVK2FRSkBrwaqYQmfw2H2Dg9Baw0e4o4yCwl+Yg1oV4Vfcyn+nRfhMUSbcRBoaOmiqGKUf+W0Jb + OI2IgJdiw+HctQ4OkJ0KsciZM6G0PTo3ujI/4IM/4IsJ62aKUqE7hD08J5gtL4fzzoyCdWZKjRIf + QgkeU7oemP8AxSGrELRsLHvLOPlCEJsKjEwNoYujcNRwaJGMeDFSYQgqVicHvZHl/kok/wAr8Ibj + ctxQ5hM22W/FxzCQ3M7YPZiaYhnseHrKY1Rxw4HsaMYbEyxiKNqDaQtIMTjyspIcF8dEkJrwfzr5 + hwpSvBNMc8GcP/rKdUF9o/g+4T9G1wL+iTHRtHFJhkGN+n6KYZcHhiYqRsrcLbIQ5iEh/jC0i0uF + DU0c+GszExcS4Wo3Gmig1RqYe1hOCCeUPCPMJUcczRPpC4UChDHh6LB9FDg7qLClOMcWaIergjah + uCY2hq8F+sN/vx5lIaxS4bFi4hqFogkdj1sQay/3H4GoxbGLKQ3RIb/MpDMQWhNoeGejY1ofBs8S + 20Ue0J1bw8KUSYSKDrgynqNzYtC2L+CGqcbE9lg2nwrQ9Cl/S/gx5fyuiUPcMUUT1BMfwnBE0Jjy + hiVG8RBUJ8MDUeCYzweLs0EwggtM4PotjwuiciLRgtdRzB7Df4JaEM7ookRHvw4TQkXFzENEFwSz + LsdjGjEy3L/cLhwfMJFmExBJdHBsFoUyzaxwbG4MN1FT0x6wggmuYWs9F2JhrIFhicEmX9L+HRIJ + Y6MSGssq+kX5GTwgpB7x4P4JnTzDwncIdLEcTdIsUo8IaMYbQ2J46sp+Mevhi0hdPZjRY9ESGmJn + TGpibEn5h7CHD3FKNncvVIe/rTAEtw9+Viz+CB/IaSWsEkvhD+fRtDZb8NsSYX7I9JGg0UG/BJti + 1pvYTBN+lNMcvWGeYgzo+ienjcx0mOkHhDxsmelxANtGdEdpCC7m0V6HAOkJldHm/D2jcPF+FtCB + MkaL+44W9ErobxYT0NXxHZUyu0IRiMGEm1HiVE2QhxFPxhPFFDTEyiJj+nsMvpENCLRDaekN4F4v + uO/MLm4Y9IZcXK9f01Ra0UYnBalJjddGRjRWtCF+fTS+Nn/R/wAEz0gkQeEIX9IyDQiagsGEiIpR + DaEignSYnzRsTKXN2Np4oQSIiDRQpbi5YxNMhBNog8c6KMa0LhSlxWimqbkKUeH0/wCCnpCTmUIR + DwGyBoxAiEyRYO8TWaOCUblZcWDCfpstGg8wSxRo3hSjeJ+YmFzM3RI9fLQxP9NlePcMlETCPT+G + 3oepCwzWBv4Nn0asnw4zsKJW+CKU2MZJUbPCZMbMTENen9DsuiopSivHhwp0g0Njz5laKaw0ZOBI + fRCU/wCDwulOlF8dFjRfCnDkMUppGPp4GxcXDp6Ext0URUXYmPFHMPCZNjSTNjTIV8xCI/4T52Jt + dG9fDOZezQhyiY9jc0dJj081lC4dKdNFKXC2fw0wcew2N1FA+xKvHuUXRJCYuU0QxTZE2F8f8KaH + S41lKqyDTQsQel8Nwp8P6Eek3h/pRMSFo9Z+hPoX5htoT/R7PREHjgyn8wlqFHjIRgVKDbJaHeMf + weGsJcIXcE6IqQQ1eU8MVw1jbFjTIJwqw0LEJliawtq/LX4I5hng7wJrTP6PK1h5gm+P5Y02KHWN + pobjo70UojvyajwnMitG3EIOEImFpjPPhf5/8xcP0UL/ABTF+EiX/Be3MeKNlt4ehuGxJ4sZN4WH + zKHJSwZ1hCLhGw9D2J7OiojpdT6/7hr4hz/FOmszDPBC+4JdioPTmF+4Z6IQ+/FL8TZvzEF8paGJ + 4aEoJCy8r/Jr5p3MQ9KiNCTeKqbIXRHg/lvHYb2QWGenvzcrEzr6hPhYhF8PKZ3Pvy3hZ58f9KpR + L1lrLrEG8LBHFDuj+FKhcHh+g3W3j//EACYRAQEBAAMAAwEBAQACAgMAAAEAERAhMSBBUWEwcYGh + QPCRwfH/2gAIAQIBAT8QLZca5VrDD3OGWeBi8ZZBGEszJwFIJ2u8quWPfAZ3YekqWbxTie425to5 + JwrdL7E67u+6oQvVqKh8Wt2P4sd9hvqMb1CQTpDrgWhdHuWeSes3kb2L6TnHRnrDnRb+/AFPGWRK + JvCy2xZeW2yy4yCHJdmCDgTMdMZ6Edkc7TN9chlvJeHDbsURiO872kcCaC73D7iXsiH0I0WRZc3y + E3qDZ6lMup7YENf1DMAvch6utxd4YnQttmbINsyeBhjhm2XBd42ZJiOM3gmQy4PDDOyxyzbOFyCD + OSG8U7lOHbo2KXXXgubBMkb8tPUP7j2lVnfC9j7IL3aInVPqGOjIp9Z+qP1kW9CJwc5PB5bxlkOG + ScHSLfi2WQcbw2fAyyyVbLl6uHyWwFyw4SHU/OKd7fTEjwsGCOF1KwHtkbebJ3hAE2QydCQfbF0c + ugLN/wDintPko9cE4mDLMLLIIcms2eQtzWz9EMezjIyXrnbPgHwS6WEPXAbMzm7pdzXntBhDqcH7 + ekj5dUMZa9X1BdQYZe+TluQj5wuEdrpLcScAEs6eD/3EbG6nl3Laythth2FlnL8MiLOG+whltmBf + CPuJXAjkx3Gn8gLFqQii+viOcYTL3w71sNlkuE3q6aIL1JDSc9Sds3vePMOrWGXjux+SYcG6y3X6 + rz5be8A7JYI4G3kmRZFke/B7vuLSBXqWTvWsBYQPvZ1+r1+QH9g/cfQ9keoZy2Hc8i7sA2estgif + JrwOlmyBy/ol5sr02VGNo6kOsLB5bB+2Wcd8lQ0Nb9E/+owMJbxjfruyJgksghlt5CMW23SO2/BZ + d5CLG3qDseSHC/mWg77dkcLoDL7PScdOm1tgXccrouElt9xp2yM2UZw7eIjju29F34feF2zY/JB2 + GOr66hE9rRgcofb64zhvEaL8pnAWdSTFsxmxbES5bewZZ9PGl+d7Y9j0IR3MjmBKQPqDey65O7cv + +BdOoh8h4D1IXr6um2xOktH6TLDLrgZH7Qj5DTivcHA5wHV52zSk6xh0juSHCBx522Evbou19rLp + LeBCeBvLwXsJiM2Qc6R63p3PbhH/AGMYm+7KOHhZ3TQdn/mUxj0Mh9hH+rE7WZJ3LbtiSaQTctLu + VgGafINTB2ViDvwvk/Rfd5bLTgT3qyGTHcHHjKHdtCR58e3U9s8WuMcxmMvltvBwwXS2Huf3PSe9 + sW22MCQ1sDqX7Fp64ys07k/9oFZz2eQmK8dT0gJ3YP7EWV9WrpYN1NuxsA1Zi4SssQxDBt3aXZqw + wcLJIeT2wtvYMeGV7BznCwnqT0SIhvc6jMEllt64QdXbu6Xa+pepU7bciA1vMFq2T2TrC6RtbvD3 + ZZTf/v8A5l1UjzLcAfq3JYC1/LNbI2aXbJn1NbAxkYM4ALOATuToLDyy6sb2QJSdWHLY74fRD3By + OQI75b6g13H4YOWWy9yfq1QNlkuF2xw6nUctvEW6S4Rm3dgu/UQiAr2NgWSN17OmwJ7j9g17u9nh + rCaXk2OOGMhjt0dl2/iH9nbh3x3dSP3YmNndgHbTrE8RHcmTdkW/CO+Axw8rjztsdz13GreN4zWL + peNgTj2ZrI2Fj23q8z4DgCOQ2dM2RWb9q911lE+8+Qt4vrPy+gs2XY17tET0W04S/i8M9cJA6hrE + 8eLnt2YasA+Rpa/V5gZGr3f9lwhsnt5FuXvkLxDdLpzkdS1lgcp3edzF20TJMdTiVthYWxZBySPF + 1j9RdLLS09ljMyzvllLdurpY8ITtP4yMTJWuOlnBJpgjsyT3GO3VeqUeO+MhujMyTyRnUYkkkXqD + Ld4JI33saeXsOWfSM0smzrbWWHfOXewSXSUd2iVy7OHptjUkMdzo7s7uQiL7CCRym9Q1U7+WMOR4 + YlnV2OQ/cikzgSWhpayGDNtnLD7l9IeuASOmCmkKtnDF56jtjfdq/FjLLTOBx6htGahvu+udveMZ + KnAe92vYGkH7idEeu7HH7IS6I+kA2fAMCyznI/pI6qTHMl2YJi+p/bBGH5ZxlluZEPV37ukGcADZ + HS14HZqcbjw8PJT4x+xFoezgmwhlxsekmMgBhZK7kPecbMWTmSsj9WBNu2CGIKyY+yPqtMsTnUm0 + Obrza6yPwbb2SR3usdm3g9j8jy79fUOFvLMT/YA+zAXiJOhds97ojpn9IFht4YP1Ck6l3IHVp7YN + mcS0sjgJO9nl8yxP5w6WP2kfUiMGVmMxkDLVbiyNNbt7gSQ6gB1EfHZgcY8sJO7LInTsvu3vC89S + QjZRkoCW4jOiHB4DuIz+Iftp1MPUOkJ0yZOQ+DYbudTrZ7D/AHCO4bPuj7wTfUlnwy3uENQdSjpC + JdsAw4EHs75MHUk6vUxXNPLycDFs8ex0gMCAwlmjiZH7WnDtgx37jFEOk/yDh274Lmk7X5yDb2KL + G/BbySHd9cDuTXbUdJuhbHu9sjSI1Hc7JCIh+2gOQz2Ye7ct2eM643IC2CO5eIywnvk8GFhZ9kH7 + sFkx3BMI928hBwPG8uM+5ILB3CJELUA2yOowdF0jLSznJtXSHPbbUf0QDydPIR7bjBpsOMvIXRJe + 44AYRkhYJKO2OzbokXyXYMLDaXsHJwnWwsY9RmQh1LtqbxJ1O5d+t1HQdh9LJy94ON/OCz9ss2Pz + gBw7kY6vLbPiw6ZY4yupBYbzu/so5POmT8lS1e7RB63SA9lvV9BH3iTS2cjppgE2dE6dI33ZnUso + x+JQm3hCRmzY9SX2FksEiPUKezjH6LEbeu5AMiZM9I/Plucf8+G8df4p/wC8AyjUxEOmWRYYNbIk + b+yrPuyDWDIDqxHSE9j+TbxKJrOmbd7Dsu1gPvAJaSK2XCHeH8uy9w326+XvBYwy9I7kLLuzTZWz + 3h1dp9v/AN//AJBnB8jjf82/DJ9jeB6Yn3ssOPTe86kSdL+Za7Z/ZFgkhXT3I3qD7vWTPLr7v4vx + KnpYtid2H0t/eTRpB6LsXRLNHp1DqmXk1csMgyEmW+pD1dTqA6kSTqPycntnX3yPcOT4bb1Hz22Z + l+5YP/Uvgxj1Me59bYdkZ97vyMi6ez+IFp4gI/uV9cM4OCLjDHUywmFstgG9j2S1OrNMjnUi95dQ + CY7n8xQs2FixCuy2werxwDEwi7dXdtv3ET/jvG287bbaSJntkX8g8eyXIDYeDdnVuY/lecOOT7L9 + sks4OHSmMI+kM7g1iEb5Pgz13Hu6O4DZi70QU7+R/mT8X68fy9n57zvx9lhKNe2Q/t3mkB6ZPFuy + I3H6hEiizZiZEs4eWD0wLTwBjtHVpZd4jvq6NnLJfka2PDO/8U51Dvvzeu36nwPxYsss4yyyeFMu + ru7f+LLbZJ5IekfRsWRvIyFwBksY+otFu3c+cF3LsE4yQg3q6scfV29ySDK/HP8ARCPAQwhm3/II + 7liXg+ZY7It4GGQfIc9uhlg2cawz2+4vd3P7dIXg7Fg3Z1GWFe8KHthiznT2QZ5sYbAc+Z/joGts + jvV26YLMs3jP8fZePJtRZ1PXBeOcf2Hf+3sIZM/2ce2XuQm3nIO+p6Y34t5blsIIJ2Q/vAO2WGL0 + XUZuT1Ztmln7nphb/Y8/x3eFkbc506QmPdl24Jjv47y8MTPczM6i2Pb1wNmcHuRQHuUj2W32QdiF + 0Ry7b1LSJNMkPL+rS9kPu7IQdkuy07cBJMOv9mDZszjMveSfbYeM43YlvePZ6sn1EmcF4I4W9Xl7 + 3DYctDGe8dQ04CyyfOoHYQUj9tlfqNV219Wp7G2F3SY4WdZB1G8J3/tn7dcESId8efD34tnJ5wtm + z/JXKO70Hw3e4Ym5R3EeWQcscM5HrZ50ySTu9T+4B3ySveCJdHIf38d/wUOHf7B+4RnHA8HGRZHD + HLZLZvOLb5De21o2I4w9Wb1G5ZpL6bLIj34MkysMvxPfZ6L1OLMgHkuRvpHvC5AuukkLPxzl4znu + y958x302Y2R3wcNuzw8rBLZvLwcSvtdfUuC9ILd7g7gmLOrNjrqzHlmyxGqz9tNvqXuNDYdbO7OH + SUWercbeMs/ybdb64Jzk2S+4+B0yRZ3wxLkHD+JPDpd71Ekcs+o4HCZDN7HxOrBndpdDuUNntg9s + l7llp3fYSpdmzjbbqz5bbPtEtvA51GYhpwkcH5w+7x98LynCrO71aE931NkXvIeBAyOuU/Isifgl + j7i8Sso7sVf0QE64CMMsPsdS3Uln+DZ83Z2M6cHD78PvnyStPLK0lh+p3nI5GkIQWRBy8vwbHohT + diX0yzZkWoyGzJmqe9L+3+GT1xlkyCdfIN4AO9xxnLfdk/sv7v7Ru93SXr/MdwQfVnKdQ3k/m1xb + h2XhkcWDCk/l16Qu9xk+x5xkj6X/AEsGE07XdpwI+fE4Y4+74kabAGl1LR8t+L1I7z6IMt+JznwP + bpAj4+WjbHBiz846k/LM7bS7wniQ9eKJDrlhYWE5YWJ0YycJYW2/DJI42J4eN4+rgmMNs3TA/cHq + f3EYcef5hGqRxnwX18vL7ut4S7eA66tLYupPhvG3vG2PHtlttrbvweG0nvjBn1DS3IbOMEuu2Zbx + tnwyyyyyyyDqGpeWJrN4dn/EO22yFl71wZ1iN4M3nG8ttuv26+KbfXG2/vD5MzLJ7ZaX1Ws8bx29 + mzeiIdW33zkNhvWX6WFllkTPq6N29t523ksLLOWwHYT5IXHU7Q8+vkMn5PDPB3ecNvwOoY5bx93b + pBgHkM4eQ2Bm2dyBh7MJ4fYtnIDjBMYJf+JhPuzdZ7WWNjZYcqsJDbz9/EecZlknUcYR0FjJzWE9 + R3xtt3L8mfhnHpCCzu+xZ1xEfgGTfeBVkb6s7jjDv4/UcDkM8b8z3u2A/AeQ43kgwijfbHQYhP8A + LM51+OfBss4euO2MQPqDGNQwyXD5F/IfbJ3ka4DTZj3brJ+Pd0/JkOjgPwE8htl7P2OGOB3q8s+5 + nqYqkJR9y7ff3wQDPTnz75OGPYbaa4MHYdWx7ifOGOBzhGImT3wtGOLeM5ecnCTHPgm2PA/XG8Nn + hZI+3ZPGcMPOdxp1w39fif8A7jHk8F0/M4T4HCGWOwaZDGzSDk8/yLJN4f3l+X6W22zyWc7ffxbD + 4r8D6fA+/P2H6ZD3taPUO+TK+U4enzOW+4GOC9WcDb8zq2JNvC94bO+M/wBDgfjn+J8wvZZg2vu7 + b59n5AcZPzj6/wAd+G2/B+HTb8Bt5f8A4ucZZGWFlkxPGfL+2C3hF377gjbUn5P+Oxxtvw3rnfhv + +m/ELZ+WwcnP1McLBs8pZLIUH39gO5D3f//EACYQAQACAgICAgIDAQEBAAAAAAEAESExQVFhcYGR + ofCxwdHh8RD/2gAIAQEAAT8QKsGsfacYhFmdBKYvUxIkrjkRnhg71CQ1MQSo3Ef/AIm4ZmWTBMsK + SWJeblEeUy3NsGozDQsLzIk8LlZcQ67A2wMTBmUuXABggvbS4ttiZX2RPaHVWGZA17iG7lGGozgH + MeFTlObuUsLIMBFxCqrMFvPEuuy4YZnRLvJwcXB4x0RqJCfkNx2qA6iR5I6uVA1DLi7UI0c8RUkC + wDIT2wywFkYRIEayuJrBHbjyTWEskxGraV4lGJd5NsUq144inODzMn/4ZTEI0hBjonIxQcQqnW/+ + Vbkg1qXOpRolMKYglFxdY2+oBhtqEuZYitTLZloEFSrDU1zOTLZMViFtEPGOnjj1MhWUKQ2uIRJs + mUwwmBdyxnNAHMs0kAFksgKsRWj7CI1kxQalflj+gEQxAiqmXZHYqHFSopNQt9A4l1gpyRbh/hK0 + C14g3DOasi1FvUsVV5YUDViCLeBAur+5hNR4diNBQGcTmofSJgDW5WakfIy9zkN+YbaJdCMAWAhj + x6hxQ2yyxREjf/xEJAqNREqxgAgDFDDWZdjFzmhu5vVHNsqrEWRNkvLMXE9RBpuD8E4CF3r/AOCs + 8xxa8CSsPEsyxKBMZiYt7bNWuFsRCRVdmopVLokwp5lRa3UcoagtXF0TiLbKin1CFNXEpwDQ1uZG + 2w2R+2vk4iFsvuKoEXSyAgM8sW01EKWQIUEhgFvPcvpyhqAg6EaBVsqZN33Psl3IEDjuVOCGwR1J + wvMwxGrXiGNQ3LSqgNEsw2ssRjlQvSZuYVqJeTjLqblDUo0YpwQymVfMtWkFsTFBiaRleb/+Gu5G + WqKHcQNygzASUWJkajSKhOGVEhiK4umCJ4XDCmptUdMBuYDC4AWN3GmiXCkKCWNaamdNSqpLACPE + B0QbamDSZxNLuv8A5syQBzC2rjQF21EsVV21mXp3wRNo4JZxgpThmeyCVUYl7JuAgRWXuaNDvyRA + xjA+YYamhrbUANNju2DAMZDr/wCQRoIbcuvqWIDcxZIBYQxaYrSI+puyyFlK0hUM3DHMBYJWNzlQ + 9cDtgAM3GGKimYhrk9MYEX2OPcUKi1URd3GogAQU4mAmgsQXcoIY4YKJUHQcPFfzABqHCoaP/gQa + vBELinB8QCTqYsRSJzYZomAl7vULwFdRQw4gC2GUBMrpiaDj8yyMHLzCwrBnE5CLpa8zOUWJa3bC + bJbJgqYk3EGu4xTqZJ0VAC1JfWPQlIBiCgOVbxEaR3yRdQIpb1MMGfglxqVC6liiAXMgSlAQltzO + 8RRIFczEkpeYoIVTeF9RXkYDRBDHMVOaBmOXVttmjio5CBocPljqhGqNKsUMh5qpRKAU2vVfo3CA + /H26jTCSnZiIUoDawPmCAW6pYY194lYoWs9LEN5dARI7qOo0wsttc1rDRnmUCt58QqhRSuBeD/kE + IX0Ijwb8ahcAVhbxiUO33r/2UR7lwxCbRKccGoo5ItQoIAUECb7KFHuZRzz4iwIGzIEyBxAYi2Te + RjE08IlRY6iEvGImAMEi9NkTFwIVKuJbqMLpGDXXqbF+5zKLZS4oxBhQgqzLzD70MOn1HsgAFEyL + jExJnLlZUu6IwzEtIWN1GHJqWsMzYQyGKYCW2j34hKBdwFqbguURew2/iAzvljwh1GVaUs25dagJ + WsRCjsy7L1cqu1tL7aun67ojgTUXA4F4eEcjAs7chEylzCFPUebRKJMOj2/E66kWXzbGy1yABv8A + yoaUQZsfI8TIFMt2fxxxGY4BZduaO5SVo2AGWWWOszEerJyrPD6maZ4bM3Wos9hSjlff5+YILXYh + /I7+b3BtabLt8r9I+IwkVZ1Y5/EKGGJQ1z8TQ8zVwcag3alyIFlvcDVLKuXUpA4IKuYtDUSq4MDF + lcVqu4FXtg8IQbYCDbC6FXcpLA1jMI1bZjrEsULxGlS3kQojQ2yjvSUgaBOYwsRzMD8y03uWLFy3 + AkuWWXlmUHCERbcMFyR6RzYW04iXix1/UqeZ8uF7QgnSFIZSXFu6gtCUWZXbEeo5u7g5VGTBMgK8 + wWOSi/iMEJk1YDCfZK6Q+Coy25SL2ZCvHx+IOaLJwjgGs/u5YJkKg3BGilxfG2HxbL81Vf1GxGDu + LWP44lTjhpbHB0NxIC6ORxdhwTkGKuDXBHBqAacla9VKqgQSwc/+whYRpPlaYcApTkPX18TCRAj1 + 31thVlLPncGg03opbnjjMBWp2S+R/TUsDyidXtZfW8eYp43DwDXoSnkt+IBIom6zioFgW5M65mC7 + PSHAWemOUrMvAF9yszLUAM7lRKb/APmVjxlELPMLglUDRDWwiw0QLXcH1JY2DxcISi9xKyBqZVxo + ncu03EgIsWiMBWvMAp7O4pHEIVMTDKAYPFcdsal1rLnk7mDiXGu73LlVy5OajkWMEyz/APAVyzH4 + Q4JKCWCoaXFojAqG2SWXB2sxG+XqAgGWbUYMtqXDQ6jwmY278XfcpUUUxz767h6IN2YTHn2xRSLK + WTOzYcalgwEaMPfuAlrxA4xfKU1LiSpGntffEo745od3j04jNiwBX7eau/ioMUdsK5xLCY2W1P8A + v3KuRyrwPT/UYXqS6ROEh42YGPWys7ZJXNabLWsveNSxg4qFq0406Z+NfiXpFAE3fo+2I+RTLRBX + Jf4CAhjhmNHRe3Veaj7hDNgOny6hAbAg0rncZpVTxzKWlT6sGVo8jTEv5o9ZjxCDK2o4ajSZ02S9 + jwT5BEQME0m/mA7Ki8IVAwyxeaMXHcqzEOxqpRDdDK4VpJocSKcByx2glZj2HGyUlTLkWKW4iq1E + aVNdnUUx17MCRSKWFaeYG6q4o3KQKSxQZnlmIbEHIQVqA5is4igJRLMDGJzLmVKcT3DUKxC/BTGo + hiBTExnSWqHMoEz/AGEXLZqN4ba8U9RtBFXdA9LAPucnfsSDZZFQ+XmeREQQ54g4s8eDiOMgqVY7 + olwygoLVXvP4iIjadzwcG8sIvwXgpM/EuwkMC3ZbsL+YzuUMyKVeC/HqV6kopolc1n4idUKMHC6T + ZGZj2qdrxUCckCw+oyCE5dnuj9cwpWJsJXvL+H5ljZHDAatURlNVXkckfZTJOIHULbBBlYJAALS4 + /bVw+ZVXT2SwxKahfVYldKeyACKTP1iWp7U5uJmUdECzW4iI6aSOm1KQg27v/wCABWGJoArZWIuG + so4gSUithCCy08TQmEilTdag5QVhbc41qCeDHc2Kz3EOB7StRSKdlZZdI5AqPHhnASwU/wDms11A + WIATmsWwcREBiZ6qCtd8CEKyiWN1uAVMxLEmNhi5U3Hm3csQIqSrggrKnkLwdjKc1gHL2Qw+9lT6 + uU5AGmHPL4jjXYMmqgPzDjDCW89/EGO8m1LX2vJxMrhI0IFp/LG5y4KvIDhyQU7Vm20tPPUC8IB3 + UprxjXmFHwqYRXrYEp2NxoV/DHTVldx0GqChIOACxbJQpw6uWp/LYrxYs/qNKy5Lg4maMtAZ9t2y + nVoSgGRzeq1AfRyMrZaZriPYaiLrcEG93EPavSHEtmTb4juiaWVC5qEZyWA0jQxHvCUjlfcKkE40 + isN0HKReF5lgOhZ3lbuEi7Vl4n3uZulJzE6VO2YSVxEykUqq4l5MMaWCpXqg6uoAo56lWwZhholN + aIAsA47mBqFhioIFPJZ0qXwMMq4raJMA9x7qOww9Q5CQsjmBVvxLECCs2eJnTua9USqmsaD0Rguc + 6hpcLqIozFViOw2sSsp7GYJLORf8GFSKZuV9WfMMhEvRv3cdtkhMVRgMDDoMO984a+JbW5ORWHOx + Vwn3a9CPfD6l0qbxy3CfnMoxaCcs/fuHyO54c/dwR2wIN5g+VEythW0aT1dp4YQB9jWYs9YvHoCW + e5dmzun26mTc2R+hz7iFgYDNuQdxYvOML8D/AJLJWlGXs0LqZttaKeYkWNOWGU2M+GWle8XAwsCi + IDIbPMUnXcwswIQq4gGXUM3wbZNIQz0PNupWlu2YyFHPqGmsTMC+yXlKGUexRlICroIFXizKZZOY + pKclcSlamISPY5yE1AyhIkmwmoiy14gCC5MQUam4JKSsswV3zqLgQOSHX/I/+XaBFCt1KyCo2Blj + lXmFewiqg+oyV3MmKhzbuDmphCuY66ZlVFQxjMhBVBHM4Tjg71PMFCr6lLGi2kX/ALES6yKadyj6 + jywf9gZAKjsuOceSJSHL6OgPyMdrfNiwWqxm3zUVsK9sPb1HFihlzY5+4WFr3PGoggqc3zi4LNhE + FFu/yR4R5KrAfBj+Y4ZsCEraK3o6KxuVgSbHdG8+X5hQKOwfIOPmN8KcGTGrgQ3MgLHI7fzFV6ak + AfhuAqpty+s4gYKVu5TDlVruW06wiaUmcaTAoDGTJCFWfcXgpe5HfTtxOTOICQJwGIrsw6Q2ihmF + lEZzS3RCPmKHUPOBq53QJmy6xKy47mgdcRBKLJMItsWplRkis9xC5Iih4lidzMHGwdykBzMgg1j0 + 8S9R3LIZtlOSrMFQBH5iRqPgfMocLlgGhszCGtxpoVUWptAbNwAWre5zTUQCwRIBjnEE5ZlxQ8xC + RkKytUGWC3C4cUtOCaKiVWX+QHuYL3/MJUBaeQXoauN40gquarEarq8StF+5n5vLjzSpyul4l9+m + 8vONlQlwGyU7Fe2fcuPVN4ZL9nXqohUDFB++ZnCXs2RK2wHbmZSgr6+IAE1LKotjHd16mGRg6XWQ + MU+ZSkwAFMi/Ko/gu4fJo6WGi89blZKGHAGP4hdlIgvs8xOJdVpLkMDCn+H/ACBF2sAf6Pq47jDO + 5VgKc9QZWGztl2DsRitAdkKg5zKEMuUs3gyoNgsg7gzj3CuZR94xMXIITC1L22ciLUqcx1bW5pHX + VwSGzAREIb2ZEmb4BshADQzc8RUFsRewQJwqEt4eEZJwSrCxuFg81Bs3mAnPiWb5ZysYrSULlPPc + RiK0XLCBkYEy0EofUsKwLNogL4gCrVxF4GLSKUQsELCpg+mWot3POo1VqFSrKWICDUzVygI6LQEr + lGGUqPi5aBSLRxfj/kOsTEu8q+4Wr3cLSrdQZ0tq5cJzDFWRIiuhZHo9QUFCxDikVeZWYFicNUv7 + hkVe637jtKN0aipKNVnXMEpNZiqAKZKpYQwQxGAXYZjiWlOzza2u7eJwdbCp0Y1UCqAVwf8AKogI + YqhRStS4xmtovxWPqBKFfog7SuxfyMHq5hLwgBYp1M/RfMcxol6IGly9bwcRjaZrmZQpcIKfAqYW + 2E/AHiU02+04ijUE3cxGASjDxKspCt/ZISG03COCm2MOFKbicst+okkDyP8A44B8LIdCA4llPTML + QBYwqlB2SxLmoV5Tg9RDO3WojrolhYRri3eplMx4jnnUPMEoZl9Li4h1VqIAGFIYYhgoDwTAnELo + uY7DreCVOjEZMNcwBnRKVAExLkN0TYSa3e6YQFW5FtUYdbKK03mX8cqOCtPhmGmnDeGMttW06CUf + UtG4WiJ6aK+4JYqIh5XrtiPu6d+ZUraWCFGQ5dy1ahvE0oXENBtQ5ziUhol6A/8Ae4xLXgfhvk6d + 7h8U4jXwY4dkVGAKS2qaqAytZWG+HTzLA6GmT8tD7/EIpOFkb8pBpYsNEeSi/wCY/oDmwp8wYFIy + ShRYcMRF0tGDCGJa6PK5dTDlZCy2gqIKu+IhlmOECPKW6zEOw3czUaGBX4hPK/EQNDCiXE5mCo6t + jiBmy3hn2noLsW7mVGe4pIXqJ+UQXhuU0WrIQzLuWjzgkN0Yjdjyzm1AcyqlA1tEOBELEeoXKLuK + hTbiIUIhbCJeWzzDFNyhT7ipb1c4l8uo0/3CILK1GseTmUI73A00S6Li7rUYi0LDPvrj7jJEX2Xl + XH1iW2IEyDroOWK4112Fazbr+fmYDD4Jk5+6g9y1XapiPEBQXejrjzLaV2N1u1d0d3Bb8ooVbqt6 + r4iI7S0yo2NjkoiRbKMUV7lN66ryfpUct8i4m8ivplpZWD8kDo6wFYqs8DVmPb07UmN55vnconF9 + xYDA+2vqWRJLQesvXEINzp8snwwICBlN/N/7AiGyKX8aYrNZIrL0d/ZLFCtI2fc8COSImmMkMcJj + 6B23LcxDxEXKEgjmIRrzQsGCZsnBn9po7OYATwxrayRrF3nEILu/MHfenULwI7MQNd0sIadloILh + RFI8QkBDzLYODNw2JVjiMBu9EoquuIMMWOYdq7xMgIKMtYOYylawdsDZjh1GQvcYZ5hkumGEykBK + LqZiRddHuIGx2MBljnIVEWGosyBLK18wGAzBSjHMRQosJhTF8wq55Qsh94PbKuoF8C9uz+Domj6p + dVocWE9V3DTb5VZwz6qPaICUC4S6uAAEJw8OVELWWQwZtujr1AuboFDxblV+IUEgJ0GI9ULWEHK4 + xDUz1XHEMBxVsaLhAHNYXB8/iELRxeG+5QI5xX9oiAJIG0upe8a8UAAFZL/Mr96pyExxdePjMeph + Y64R+8kuAqVWE7riGXZsrJ6YEZXLl8pKwtJ4v/pCofSI+QoPpvx3KMVOWi/Syi3IiovE6dyuhN3C + QLTBIPcFC4qtxFtrdwKqRl6YLFLQNLpMrfEqy0ImAqhBS7zFGlQoItqNIy6rZEPBgoYsBDpMMs9a + lh13G8BcsrxmYB9iKMYI20tngh3wEuzJ4ijrcTggJop5IRhFe4N/qjGKsZZtKjZTuJiwjNLSVMt3 + Dpe3JHe6a4qB1l7mXWZaVuW1sgKHsVlVGW1Ips4Js2/7EBcJoDvzHVK7NdJgeXJBNgFXFvfLBRvU + DlTHqHKu+qbzDngtdrKwNffUMNRUxjxQHiYJRcHZNDc6GCMSJZADnETYoTdcxbu11a81f/LlIOZ1 + ZccQTli34znuEtZwCl17jU40QukJrVQw6Gc1GzTUHtKj67NFXEysKt5h1wyKyGGvIo9bvyZxDALN + Vf8Ac6qzKJKVsD6ie9Whgels+H8wUcGxCkYWjSWRrAM+qI2YM3EJcxcxcwKpwpgJUX1Li3LqPa64 + 8yyFTK9Re82am3b2EMU5cMQArpcQrUYhooPMu+JPcUfHplSpaWglfq3BhgPMq1pHkO4zB7mUPcwk + 73TGA4U0kQvdTKhuBqi7oit7zGsVAspULa21AQl50wmVFMt1EGx+CC4JbD9RC0MGFYNQFzi8zdSh + kJm0FFGLX0f7E37B5P8AFGPHHMKqp5OT/Rijoe2I+nOMaPRk+/UHWFpkcbf4glx8cAutzJMkRZRt + d0n7xFxFYKIo8OeJShQJoeS/5mYyAuoCZGW66Nr9ksIhwXXNA5lC5qrb/Fb3Crrd35BrHuK+B5sj + 61+ZtnCoxr/p6j/IqvJRn+vuYc9IAwtdUh/yAKVBYag2UqxEOkLY7iEHduHij/XHuBLUDpb2Kwr1 + 3GXAFTxjXs/UgYStvGePsb1DQmDItfF5PT+JaomkWAOW+IjSWtRQoU//AAJFfzzCig0OkZmiDdOZ + 5pU9HO4zquCpmek1KdOR7hhrdkDJjklOZcxgS3MSJBcLxHy745lpd9I6mW2e6YLrcHOyUmAZ7QmF + GGMJd8CAQNy02RwbEYGZQupV6LEorNMxK5qObjnVHUXiZElrLkrFzRubzFpLbeZZbV//ABF1ZWMf + CQHuLOdRWVeWIoC9RGxg8uoTa5TtbAb44wdH+wWZlYcYNvoPzEH6FqWWK6P8rtZeXoEOmlfzDDBo + 5d3m/bASQbaINtz9M7xQIN0gZXmGQvbGxhgQ93auYa7cz4D44OOIRVeVQpqxmPEgaeD3uLqyNqq8 + XoQACquktI6kYgoYaFH+RBODUOtq8eDuKsZWcqynuWBD5YZsTr5IWe4ti51gcxlktSO5sE1vkYYg + dVdG+i/2D7hbBMCD3of7hHly9L94dL40/aMsDeA9XiBlq3rH5f7gqVRCj0B1+/IYRIMACPHUw6rj + qZDV9y4ziGYEXaUdrYyVKn5RhGTki7K3GpncqU44iiynBBq1xvDTL5T1xEs0ZlJCPiAWxAxddLHt + keWLqCxqPOrEYgmzqY435pGU6MO+5RGDVnMyl8x4rLiFiHzDcMqBucbxFOduIlTN6ZmPEyR5ihlY + HLLVSXquKQDMh/ESXdMS8oIcEsGChmKl2/EQUsFJSt9nxC+jvedEIlcVwDgHfzd9ROoHYBVJo+l+ + o1D7ymyFiXi7bgI+oed6Qp83+cJCKlGhP4+hiXw6rZCrCTXHyRce8RU5yKMe6S8EW5iWS4qNlWWY + ZjQ0ii0BtUxn5/uAMlsdFZKeYT4bt43I8JA1zTwzPgbpZiJoQ0cPTnzL8LJZeTyTMVjGJwc5W8+e + e7jpiLYRatLazyFHi4U72urlEsfRdI5O0zX+1d3hA5tWX0W4/jNV1vjiWa5vI05EVXwDPOcVuZNK + PL3f3LLacAjJ1/xjAUeUT0FPtj3EAX0sA8djuOLsuIXAja5h83owzQGHQCuGYCqQ7cX9LuVTfwI+ + ELgisjeKIvq9w1ssQtKoCWND3EHMUzsjZyhohGoIQSt49M3eyDOgyzgHAdMuq0QZ4QQVQR1EqH0u + US0krgPjKYWICrHqXSGCLNMqR0sAQKCWUYit7mhLtHFsSgJAMlAdzEXuJXUM7dxrrSYDU1BqL3+Y + E2V1LQOLmG2ZnjKhMF9sQlCkThfL9a9Xuo8cwq4A9PHxGgsSiXccjBfhnlub4JFHSrKvZTqGUGGv + rFuDPUq1Z3mv7d/EAN1olysUp5wK5wxnbMygzZ/JZctgfJUAZHCiVZ6yuQtqGzf0RTaVeKGnOoCr + 1Z02LXBKCtAnhOH5P6ljdFuV8dO9Q5QththrTvJx4lPeGVdUMZqjDhXzyFKcJqkWiHCWaZ1ExUFm + QNNFNZxD0Kg2KZNvzRh9kSI88Mq8315+47Oo4JfzEi0brOfDk8wUZXStOnf48VEYRs4rLx5iEOJs + DsHouvOGyERDqVHOi6l8KLLiiLsuV9TDAxiBqaGLECwvi5VNyZQiGWHUuMBctwy4jTkeGWe7hJdJ + qKgVe2LMIPBHtiRDkp4hQiWYG/MCsA8DDQ/KC0w/JGYQGR7lBuZySiI4JKCTNw8FnykbtHkOyWIh + 4JlpuO9IKi6aErUrBRDCUXmBWcslW1AlqHqYJuDM2gwDcWyLGHQ64iKkpIQqbBAzKpbTz3EF1BUb + /MvJXcpfJ7Oj5eoThFNgDw8sGBo3mxoH0nCrutB9EaKUbo0MBVvOl9BVxrB4Ery7V1m+oDIHht8J + wfvNQBUwQcP9YUi0MO8sL/NevlMsS16K2n8D5x3GArw2ynoRy9Dm4SgK/wCg3SV5YINQVUgbAwNV + VtZY0mkPmKc8N3xDWI8cDbvD8wHQGhS3I0efJDbLKdSzQVWh395lam9VKV5RHZnI7s7tgXbYJf4v + X9Ylzjhif839QXJH5jxny40+YPX50t1lU4cPmu4ca2mUg7/lTd8dxE1aBp57HTuudxU2Tlmv9/vw + ZYAbKDNPNhp+FwXYoC6z8pshNTlZIZXF5WxPKQgB8R+v7NRVH0NxmtPOFz49F2kqtMZUNRm7QieX + gtRJTxm2AgxE5l/m6DMcp2IYrG/UVtQjLeTiL2jFao2TUT4Y6ICpUQN2MsIc9bKg14z4hQeFUwrV + xrNixcM0dFy2pDwxZiHhzNwBke4AIcYxLi6xLIn1ANMSYN9xAU4zEWyYjDHUw4volXH1RRpJZ2Er + 0DwQ1Ub4i9RWnuNHdysRcxXUVh8JfJUaDngPa0fniEKqoKAYfS8FdPUKtEBEs57TgwNXpjcdicKj + oVAZznhxoKEMbKKbzvV2xk8wbF95tVs62oRKtCByrjegMqgH0ootUhTD/IvuuIgN4dgLd/WHtJhu + FxwlB9p9QKOlAXbz/H5gQhoXbuj/AJUUA23B0fxGcwoltmznwzGttZHSwT74l2QSt5oVCiW8II+B + X/jDUUACU1j+iMXkLBr6eNfCHJDKkaYp2wNXjJ4vWIxge6rTKM9lcCXQxzq9V35kWZkBoCGWAJDd + Rapy8BfKV4xiaC3lRzXqYQiA2PYfzXvuBbmeUw8o+JVY1ro7fsTwnTOUVHBf7KxsIICrXvIYMUaV + kPyQXeHC5iCrWjZhItUVh1Gtm+fLDFUaS9xXSF2KY2LTasX3M1LXeUMWlk5YiXvkhlrDyblJFf8A + ycJmbmWQbmiOYJFTg7McyDgwtrNZKhmxTFbg7eLlxSOEjCXoXuKt5FnEscPqV3WM1UOvMScjUcaz + cUIFdTSDeotWXqJxtTqHLFKEQe43DQ6JQg8xFq+SaLYdJZ7imQtq5gxuG2pIUbfKVhWvzLyJlgFx + 9Uf0X9sxLNAsoLBZ3RUEHuzfQmafWPuVBlidg0ZqEDDiHNRo5oy3zsbRKF1rZvw/g6peC3rQo8LG + zpZ/btnTihnBtf5fvm4bSFuHF/4YbBNNsfkIs0oYYhgOr/G5e6wNjmria6Kk9Ov5qAu6Q+MX/ZGF + 8Ma5oS7r93LB2aV4eQMFgcYDBnjoa1w/k6a7VsXY/wBfwyhFSl7mm/FXcAYUvYYvemxfH3KNxp1d + fL2Dnkdwhti66LIPC7UwraZAYX3MUCeDl9f8iR2MU6e5ZqysskAIbHZNDyW/CnMqMI0c3zPCKAHu + Y0DAP8srSLX+EwvZeeII7AVRCqzNZ4gtS9Qsrb8YlWJbFS+Ac7FxmyirY/iCEufWp2E2lDY7IQWe + sYpULUGUdkqpnObhWW7nHc04rBKzIeYwuu0SALb0wgZLGyG37AzAapguEh85ib1OSD9JWFSuL6gx + NDZftcJcLvJg8wKFU4VWJjsu0d4CQHlVd4hqqTxL6M+IFGonbDqXszzmKN2zuJTGuJVLhZSD5TLQ + T1GApMzSfKWIKqXS9/8AT/UKzIcv8Erzy6/3iYPRKPqeBaJqdjlLbF0X3inVw4CqeTyOQwvNAdSo + hgA9H97+WUtALBcFykQ8DowfvuWQ2yBzc71QH3mXJuwXt1f5fxMiqS67oP8AL9M/rFyhf2+yKJhc + +wS/nX1EUsgPjJX4ItlgKHFKX9BHPi5ZzVnTBpFLAsxw14iPACNuvvjHUoTCgVT+zr3OFwLoo7P3 + 1CsvFRUBgh+KifkB7eufdw3ADBoh4Xn25/p8Mzsgn+/EMqjwkQ0iKb9ThvcVQSUuOVlkAqxwQ5yF + b2G4lgBSiMCsy5D4hRqmimDhPiYQtF0EEPLpDX3Mp+6tQ+cyuU7sC06hPoONx3Ca4O2yAdh0x3ag + oHJDio6ixAil8w4i6qCcBjHmWC4QjoccMxPJrgiQodDUrLR+TFzN/ZhOIULLbzLGgtnkAF6Zg2Vp + DmNME66g/NC1HSgOcxuFp3cJ2R1IvCcbly6sjhyFoJQ5L0kFQOPmUw3DUx825Sc2keYuxCCeCJC9 + HUoyALe17nSRp97fzj4ilsFu2foglstz5f7MR1M+P+5jeXCdLlB3VHpZYeqw6tiPw4zfTL3Ybbn+ + 400fXNf+R1M25bzm2v4+oEGpYuKzf8B8wGzw6gBt/JHXa3VeDP8AKfUVs2t85iPsBkgw3YR4S4EC + qD4E/wCyvcBa+buBW70dpqq+40qxqaWc/m2IEHTC6cJfu/iYzRngc/j3Dt0wbhICLYYR/uYIihLH + 331nsenxAakO03bEreeF/mKNN11ccEh8pAqRXNgHjzH0o6RsiCzHDEcSkbtAKUGYPEq5C2rhg689 + R8QcFrJTXBlogIANZEqgpqi/7xLrlLWFJOssq338phoFqN+HvxESKsgphOwlN9RNK8gGA8GKgynH + Ey1kceIaCcRHmeEIHnNzKVl+uY5LLWCP36iAxl3GDdQvEZCpVeKmcEOQyQEZGoIhYKXuCcUcTFhV + T42cIp4oRWu1sOOlLi1mrrUJVMMxyglsPEA8w0OYZBLJKlaW8xOxGs1Mmq3OSGLAddwlgoLvtlRV + YWY+Jd8o2uEoJYorO8ka0NBX8zhFZOOQx3arXKvMr4tDzRy44Fr3fUEREGWzu4OjkG+4o6Fa9nXz + /cIXW6uKyr/ghNJtb9Gf7hgHnPR34V/r9SibVGesf9iRyNo0gpg7/SChFsrjCvxErG5AsBo+vPiI + COIGr49QH+oK6lBBX01AUo1/UceLhEVRaQoepVMNgbbP3xKjaFIwMxRAOw4iA8TqtREb80P7gKra + DJPMvypeCyCEvpEslax89RrvBWB0Qy6LXSUqG538QACOaV+SFG4rGCK0m7UxuTttb9qgFQ4XNPcs + UhpG+h6h4Htb6vkitQN6DFvwLBaQzHKb6RiAs98RlAZYDgczOFKMxC5Kf5grFBmp4pfhs4OIBZ64 + VPX/AACKS64OQg5vkKAtOqGnBymWLEEnhNglgRTxA1UeII/tyZNHlD1QcEX9TRjcMI4ZL3MQM/cX + Iri1ikbXx1C1SrqFicMZnCti3ruYEtvRLYlV9Dzf71MqtOUrCMuK6IXYMmsmOIjqwCYp1b6W4GgU + ftiBqChfEpU5wwyHDZ+I5w0K/MG10cXAvGKX5lIAZJnwyWHQj/n5lqR2XlyD9RE0qbmRX8x3mYRR + tx6UgSkVTFks0n4RmXEBDp/dwa0GOZeK+dRrI2U5E69jCIIFaH8wW3CuOY6m13RxARFJhlzEPmyM + luvgYq8+eXD+wyxMxa5bmGhXw5pMbCnJ2+IW4rxxPmXlR08H9kYnkfCB8lrJ/ZOBgCr8ExDmLBbd + y8spDVPjiMmh3bhlQB9E9kQpVx8kzLUUywAtyx3UDqZPg3OXfM2ZE0NnvqV+CuuJYmbLhgtdNIgb + q2SMjKzCZfJpl+seOY4jpjxCpV3klekADW4KCnNMGbzhriZy2vUrFC7iqqniKoLWmG4wcXMul3SC + K3G4hfQlMqSgckziHmioOvRw1KNHDIMuUWNLlm1HujP3FQI4wDAEClvVPDGFT1jmlcX1UBtvZuNi + IOoRHvGOsG7vJfiz+4zbar+vREV+YM60RAPWZxOnEwaJyiADtf8AyUxsGLdg38JA4mPybnED2HCL + V+A9Ro0Gt92/gQrVCSq3kdKz+8QqmgoUujFPWL+IHNwVw7gvf/cxRBTk3c1l1EzTEMlqZ3czNW8P + EM2jCOFTMQOCJWZTMsscyDmPu2XZGiuURzCNgbGXPRtrd+psBzDz48xJZTi48QCKwcnjxHoBXGZk + 4J4r6q5bmlYP1iJeIqRKt2SlpvEWa6glm1GKlCnDtGODHk8ISkMRE2nTKSqzA4hk0xCyOHmOklnc + p2YxW5qN4tgEC2Zdgxcf32NxxSoPapt4CCMrTjEuKA6xGWAQI4MoggDuAyrbUYuVyhYLt0EXuHAS + qrLmjUrdi5u4GJcmZYSTZMRicZglBDcMU3BgFguBSH5C1iFROyC5r07Dg4gdEHZq4b+swPy/uYjA + Q5FM2QHDgzDDJgJtIL+v7nYwf4/Mcdl584v+5mW+V/MQHqfAubwLkEJhJBhznl8sqkBrLYW2lt1f + 73MFYAmdKfqVCsdeJf6Q8frGQr7PF7/d1EV+RTsunOcb1mOAlaVYhKDRkC8y8kOHmWIi2Rz7/Et8 + qMeIQyV8amSwPjEFHRdYwDBpLj2zUzjxLIpMZAlESFjqZc5Y3aS6mP3mrJdvRyvcqTcoOs5Ipk1Q + 4jrJvKruE0vpYjiHcyZgLKKPPMWN7VyGAruR6jmClboEIoG4baGpuLSYEGjGyHNifJDDwbC4BXzM + C70QeiLddRNZ1Y14BpJUA22yyS3dcx20P3FaLsLErbDARhQ3fUtYzgzS1iCtK3yxUoHZvMLwBtjc + LA0ZJgWm2oid0yxgIMl6iqfavqNWi5thQ9urMsDsh1fMYtjY2IT3BajvH/PEGkpqrBTPF/5A3uRH + iMxJscjM29wqgQJCi6vJGmOIkujphmsN/wAxSrSyNtmveLjEOzJ+P4jFmg/f4hivbL4HqLAZov8A + L9+ISMwqfVf9/EYqkp2IJcGQgtDhSs/n8eIxWgiq24/FQOXMrUABssKto+vuHkGzLdhbAEKDBXEH + ZWLamPn3DUCscv8AREAiso5ef2o1ynWHjMFmnv2Pnubv/ozOeK7ISGA7qdrTmp6ZRmcsyvcYk52N + YDVddcF61wnEq4LajFOmXIVvo0yt1Wf6GDosFcS3K1pNkVoUpRT83FvImg1DYJlXT6jgW7plVbrQ + 5mckBlp4iJUQm/NRwlU2R4TT1NuB7iKStrZLYFvZM5L7QhaAiiKXgpTeYA32JkKUrdS2i1ZobCLQ + IL1qUSg1rUq1KuxmCXuNxnMFBBpSxbZC0VA3wSl2fCcRARhyMIpEq+WHFFZWCJ7Zeo7w8IsjYNHP + xLuCh5J/kRXNNZ/LqUt07tuLSCOaW4YQKaQAnL8Q+eFTnjXj2wI1LNsYbjsxmAXZYLQHZ53BvCw+ + IZttHpIm6Kw1kLPgPuHgr4hiNLZoIGDezO/2og4XdBEHZWS/NRmgbnwX/GK5XkLgPnMEZWlDAP8A + RIgZAS0aEKtmqPeT7gXAzaq4phNcAOCUB5lqK8Z/cVV2sAX8Zz9QcD2AOV9J/EqOSVQm/HUsjE8O + YVYX3cJw171MhtZk3Lqy27Kv45hLVF0gUDsPJ1DgC2XZ5iqAZP4WBTCY4ZfrlgcfOvzGMDaJycx2 + KxVAUApw1/6m9wchpv1K2GlW0UZ5oQ+pdLav2SyVFNg/P+wx5EJ/jBYM07BggUl47JjF7YmY0gup + ZRTzCRLyqAw/YRqpkrIJbRNQQ3EQcNMpGxWJwMH3K1KCeWo8wdMqmJjTg1MEKM5g4BSHJBap0cZP + UUKQaNktJnAgFOfPMIWWaKIptorMHLW47lzA7wXMXjZeOZboFrMX5riG1dvNlVwDxGgYJvVWrkrH + 3CUSwCrNOXO4WoZrFjfUWZbXgfowgi1VxbpydQYrZuhVQAjPC3H8q+XNm7daT34imR2QYDSnKwy7 + FQ8AF9qZghGAsd1/1/EMLC0Ivj/wxA5oPljMIPkfv1FDkNB8c/x8RZ1RyoYz8D9xpQJlyscXFmwr + /pGLxrBY1D5wfUM6p5e4AwlVMg0NfxFojKkv2y3ElgCvjNQ4+mac/nmIsVgarMJb+9wYr3UaxU5L + uPpWeH9zG28lWyMOZoXjiW+KWl4f+xQz8g09RRgFJHxMwC7+E1CxYwdsXgLar+I2MBGD6wFy2KGe + LMsqxaFNPTGaV1yyvfMG2PC2vG8QPHc7lcnJbqU9KQFjL3FJLbt76jEtXq8xgzxGU5hpDpa4gEMh + gSkat5lburmK6ooySjXCoMbGDriMR0DsRaqhu9wnPXEz8qmWbjSxlNlc3KMUUX5lJcSu255RXlj1 + UILp03MPHMNXFyhC57ESiONJuJSns5JiX0MILqbpdw2mDhG4aXAw9x5yi10YgtoVo2MfDllGS7PN + n/cwEYJyTA58MXy2RvHPHFy5al9A/EKTWEH9/MVXzAcLt7PPEc1bDVXkxp8QEXSVu41UfxGZkrCa + 6xLUhLGxRaHNC/N4gAuEAOu5REGo9g4+B9pkl4x9zCRheC7/AKhxVmdijX5v7l41VgvRRrxFEWJR + 5cQtqRRaWxXMoCnRESKbxmOKAtVQ2gB22fxMqsynJ+E8G5w08NQhe1mq+7gK2DVkETUUAHPDL11g + sOzshNIGhohmmyjLHQb07IpMvaZfkzKPZZKXtLEOtvP5hY0pBcdETgsHrqCTMCyES7oAUnyQJcew + b1xcUBRXf5j0isY/QYFTmjuiYRgQThHGMOx7mK7Np+5YpDd+7lJA5GLk4VGp4ThFFykAO0MdnlYS + kCDiZxKcJV1OTPEtsHLBLTzN5igC+SbI3ATh2SqA0QlEvxFhsfiL0txaVc3LoxbpY9iCS8HOv0jz + qqtZ8x1IAwOCNDinLkY4FxKt0ZlB27rMOnLJkxAD+BrUU59OxEdY35lEgpbPh3Ce8ci58ynLgpVf + uI6G9r8+uvv1C2A1fLTBrmpa3WYc/wBu/wBIOWz1LUw8jSQ7ITUJfreRgXlsWP8Asw0eEjPaHsEv + bwgxXDBvgeOcx0GqLqlXRjg5+iWmhgP0KB8RW2lPaVFFOhxe6K+7lxoDU5S1/tfcBYA1TqEhrvxG + j0S4FDBtA3TDGj7x/PDMe1oABW7t9SlgRTsS6u+ZSCq9peNNcWHzLq6UpSn+Rfioii1CJdbxdX/k + WzAOs7fDE/AVPrcvcFMDuF5CDZXIyyrJhOZY6VbsHmuYlHFVZZj5mKDjFtfmEAchv+KwymrNtsPZ + h+ZaO1M2cxfWJqPW5CBp3UUyALBTCeLWHC9MqNAtYR0wVgeDSNfkZWYQ4mBuEaklzPp7lWAZFvVE + bkX42dQwoKwMYCqe0eYfTdoboLg9Rlk1uuJdUJ27mqK5MZ47iEqvAEqK73cXTlW2KcRM0czhuVRo + dEcLIwiWSLCZeXNRSKjSkD8xHU23UJrzD1vwKfmalyIDQEwaUN4V7YbEqnEXwSgQop8sAK6ATUuQ + KYph9sOLmL2BBYmND9xC/Wzrj+5RdB4SkKQLpt5v/ZcKoMF43EjZQDlRxUACKvDOaf8AT48wYbme + mZYz+qWWyawJGoRpVFJzXXcJ11vxBGKUK9MMA8vmMC7vr9/mZnGcdqx/P5lLh/IUt+f5laFxvPVY + /mKRsoCaBb/iZf1FLxD5L2QFYXwUaDBRgD9slLwljdHnjA8QW3FIXQ1vfLCMmrUFLzj+kAbKqyuF + fXUogb8CnGf4gyAJSHI1ivEVxQwHt+1Ky1uy0669dTSFt0biqGkRzzLk7L4ghQo5Jmg4G7P8TdPi + 8waZLwQmHU7a+J/64qHDCRR8kTLVKtUwX1DbL9f0hsIyxp4pjULtDSOh35gr0Cy9+Z8JLlJGMs/J + G3b6qeh5PcECLDxTkuArqeSHl/hBpsbzMufnYhSCWMruXLrxmBkSDjhmYsCVAXIvEurJq2o5Zt3B + PFC5ZhoS/uMC3iX+cbI0FuJTkmwi/wAS4UTcPNSnjiYUNZgKOX2SvMBQXWFHxAUKQ7iYsTADPErt + 8tq/qGIRyllHQy7Xhq3oimFrptX+oo6W2UdQIZRSDAcjardsH73Ki61XiNtg+LOs/UW7Vibeazn9 + 1Bi5RRPFQqc6cmjz5sYWFis1yc9Z/rxN68JWRfPxNnlUF8ZKlm+XrzXmCi1c1fz/AHDz5I3oUuKE + XI0+pQFw2vrH9S7BeFeW6/fUYjbYgzwATQJYW/ZE6K7t2dopfPE0zmbLfEU9asr2rTz/ALLCxSoq + tgYKx7+PExK2W6apOXnMWwhgjF6f7/2GhocQOXfr/rEcLLaX9P0VMAOGa7Xf+Q0gE4P7vyS1DSCA + VT0f5EwQiwmMS0yab+f7msBG/f7f5hKmSmoQC0DdS/UhNkLRddQOytfERhqYI3/s74dTLpxqo4on + 8iAGfiAvFUVqCYhtNePfiHK9LdryBr7mbuc1h+b/AKm00Wcgwt3db4/6NkLGy7/DqLFsc6Zih8xd + dj/sbMMgDL/sEjbNao68ke5UNbhEdk2BAFgcvEyS09GpZtixsI9jFbosyIeSJG8duIwaw0x1zBgJ + R2JgMIsp2gyS4AYckuBy5gC07oGrx5oYt3QL2/LFcXAm0dJzF2A2OwO2EW1oa0fWJg8ymlBOuKzK + pUVup/kjrqGyPdufctvpIyB/EdemcGA8xQGIhz8uCWAr2BKKkNg/SUXt1+nf75l2jAGla79xzbtT + GTPf9R47NFD7lxUNW/1qcTO13Fauvy+onPcYt2dwzumRffqDWKL1V/EEiiuxVZXt9y61ZDOcv/Pz + FdSr9XBBM4lPBEra59Bq325lWmrPx1j6mdR0Loc/LGzysWXmCiXFKvqoPMzAsop/7HloLZDZd54+ + eogJZYByC59d+OoX1IUaLVq3/XcKtqOwwuPpzDUEHsyOc/iWbNW7M5a+yHVoB8Nc/wDZup0/l+/m + VkFmX3UsdgYYe0Cbu6T4+YS7TsaU3x+krMA6OQ+eITBLLsgmDQ8Swhk2bnJlu037P8lyTaRVPvcI + JYxmIMjdj3iYM3hfCqLnZKNMcvljIg3uWa3eBcMC4Gac/Uo9IwTNHZBdQSJ2QRDzvo6fPcwis6QP + zc2bfNXu5fGfE4Do/LzBgkNtqO6umsQIxtwAiKwLZW/KGcnkXczy0vTb5IcbjBuYyqzg0eF0fmXh + gqnQ+gIstavND05hQsMiWnbY0e4xtrQGtX0QplKZC9Ay8xOhB5wZ/EVuGmufpWuvcKag4CeTRywD + ihW2d3OIasQItbrV3mZKaueE+oXA72MSu2wIC9r8yqu8VGfSNfEtjWJSFexXNVrEtSmRf/eICCgL + 6HzBtnAyiiidwmFNF4L1fzUGwW1Yr9QolmcL5QBqJa8YjS2lZDYSgVmY2Wjf1KgDK2VY5N3f/vqO + hDJWX0r+K9QrKoWu8d4g1kLIsZEd0nmACDoF/INe6IgqACnZwKeYLslMgtrqVqnDNOZTshvBfycw + evDL/SGKhfID2PMNQA0WcZ4zV3/UxBVgKmWnHGM4ibx9upbsTPKwWhkAiKXaXrJf5Q4KLU7P9TSO + 60MhzKyosxjWv6uGZwtOF4YAhTniuP5uVg0VzvuHg7PxCol3bJdkSQ5fJ7iEWQyGPxLABcjkYWHS + zFzTum5VvSe3DC10Gun+v7jpyTIn5YhN19rmBxlGVBVRceGm3zDq7QQ0Rh5q5jVY8yhZkxeMnmc4 + w7Gkgm7IQKKq6rNWz8Vn4hkaVptUyvDVzK/bP437iFArlMV5cbjsG9WJR5XL1mGsQXbUdVe648y2 + bcZk8/8Aq4AK8Cn8CYEwyVfk4gSZ3JXHLEQLtJhOr/1mUVWMg7rl9ZlPAjkIFVVOnH8RqxwHEt82 + I/E2OBrZLgP6REqbBKr8XvFQPv7RjXLT65/5DgSJVbSWesNzVIuhRSiZO4dYl1FleMc9tRUt6Ivm + ho/mGFSSUlKvdeoA2Y0HHtzCVTTGVb50PmJJG1C2xnjjwSgiUnE1Ylf3N0yWAavL1Tv/ACVLC3YX + +WN4GTIQvVBjEsRixtBZsz7PslgEUomfg5gcDQlQN9s+PzEaglqOLl2iWZ96i3CMC8/oQHt8PY7/ + AJgEr0BwH+agFYFGhsF2QQtOFars8Sij7LRyjwX37ijiG7VHRj7hWmMGh/5BAOzSLHsxArA5af6j + sJhaDnycTO+gDBrdzAImnKp17x8XHJTar8D1+cX/AMUA2AUFm7cfvicLLbKVea+KycxoVDApa991 + bAKGsRlB/wCQgolWDCgB/P8AEKxRIBfDLQVPJ5uCtkvLZWzTKYkFt2xwBhzx1n+Jm1eBb+JbALcj + GS8o00wkudGymq7jqBS3iHVojQ2OIZSEfkmONY3HsJSUN4lI6YrCqv2s9RJtfiIFg/XMoTZCLEBT + w4ceY5OQXCWJhbHCPjuFKC12QxNTNDjMa6GA4f3uZwl3MteYs2oFKY6fnP5gH72lVeGrQ0+N6a5h + LGCvOdFfvuHN1VqrNZ5fwxgRdIsGu5QccKE6efBiiuisnCqxWXq4XSMkE3KNDdvFialujLS21YJu + Kh7WgvjG+Ifdg7QX2lfnEyULMBycx3qigmqvWc7N/YRqIPCHvdcWcXzPaksFrCUvN/CR5HvCRdO6 + Tz+ITMCedtjf+YiazLaqLAtdHIx0slZscA0worGBKeKwfmU01zOPBOzuWTcsoDe7yfmEisXS2/i0 + PUNYEywXrTD7ifWGrkgq2eF4vuWsJLWS7o6gH0LVxxT72MMtA4Jmr3brFwNVwFWhcjXXx1AeCjI0 + reib4jR6BYoidZ8S+E5IsLLPdRjLbzYbmBmly0fjmWA0zZXnUVx03ai7PH1AUOoMuUaiOdlUAov/ + AG5TzKZBn/upaweQ8L9mIEgtAVgHVdcVFAI7QMSwOmwNwME933Kp4CXu5UUIFMFz8TQxgsKXoiM7 + wrQd2YVyRyGICrrh6HOtYiBuX9l38X9ypSHmNm6fO8+4HCyXQxd2P76jLKgVbHf4/iZ1HO4q5/tM + 1gCtGZS0DLnEpADPb58wrPWFQzDbeXzDYFOvcYLT0KplBjQy4cQewRxLkATjRf6jcB8bA6fEUCXR + X2xrrmgVlHOJpCpoWQLWt1/kzhsJbMBXKm4osNMH5JCKT7ExGFXRv7hl23oUrdNZ0/8AIdYNsoY6 + 1+7gcqSLF/eGD8JFaw9/xLSBwWOI1ViYtRuICbQFwVnX1ALDBVg+OM9X5ha6LM9WXn8dwt1gMg2X + 2YMNxyLTakNLS6xXBGAwgQDjn9Oobll1lVWl8D1KwcIDQDFlZfyIvAZdAbH3XMQFdrMwccb4iBFV + KUNZKJr/ALZEigYCD1ZX39dUt0Tg8uWm38+4kg3cosWX11vWZnJCXd2/fEzORcJY36GGuOWxx5+I + CfNYNNNZNj+7jLGrZjLwUGITV3k09Dn1AqKo0WliqZp6WN1cPAUgeOL+o6Ao24XKY7dNVsndmwf3 + zGiAskZvhar6lbdpvLi2tcQdNBlBnVa5Jhcst4t2lnziXgFKGX4tpr4lZpQqBTOM/Gu5f1Rac+Lx + qooKbg0C6+UQexDDzmXoLlo3AmMuKO7r+5QL0MvcWuBDnnV16YLLOZTe3P2fbGlDOcp9xqIqxZtF + /wCEcw831KocQK++YBaTgvUfWABzbtPg/MOA48brdRdUstLv34lgBb0DL1zR+eIqkWp4Xjxj8zDq + GAFrbmoomFwsZqNJ8ALs5vyZiomgnarKgNvSPK6/54jkhMO6O4FCPmBi7Pb4SEUOhr1zwsRK8UUv + pWGh0Kvq2IxjIW3z4ZPYS77NFA3boK5SctKzv9HzLe44DKdssKugLY1y/wCQhV1gMlLuzsmQxi3y + Zs71BbISB7FO+IipvtRLVy0vLeZRbjXaZ5q/EsQQWqtJ+W8xEzTTkWjTdj0mKi5t7x1Ybar65jtC + 0sAE7vL1rERXBgkgXhvrEpRIU4vrvxDGW1AAzQvLnvqYhGbind3sbjksBWrV3V4yViGXRYBGXOvb + 8e8F4iFUnmnP/tYinjiWyZs/pj0XLfIDivBxGnIM5RNqzV/nXUMIVosonbDFmeImgG3FlKwAQQTP + cSFDglwlmOO5lqURR4OEmeVnRcvx9QouU2homcYsiooxNDF5xT0yIuYYVrpDK4Kwtgxxn0KJQsoq + dPJ9RbFVA4PUCK9ysFRCLS0KD1WyVZWUsPLSufH1LIcBSs5vMK5YZ5cxeNgvKB2t863LPI10tl5h + WKq2KHmjXb8w14gFwDFZqqI7wF4Jefs+YzQdwEf+sa8xTjghVbNU1ovMBZaqYx8ufUpzgg1pZn3j + UG2okyrSsqVGiXXB+/c1EA0ufb+YbEtHRgrmZtplXZ/5ZMYgCF5dXdbqvkg0aFDgT+NSpFpxNcWz + DFhQaCP4IkFyO3/kXEA4KolZhdhn8StOhxE8XiHioubIbXv9I/ImltjVcKcvZK8ErhQqsv13wkoM + 24UrljH5qF2mjjmvR8/UFlIhVd3RAXSFaYLVG5QZG6UVWH/Y9hUM1qv7SgKtn3DgMHe4bSBGCCoO + Ui4fKXXyQqQ1BrY1Y9y7lxg4ZfYmDQfk4zaccylrLrfm2eCupoloVB9aqWGnGzDzRzfFcxYILaWB + nI8vjUPEchTSFNi28H1BXWicB00FtHPuY3NFHpy7/wAluVAWvb38sx0scZEre+Ou/UACCs0nHgMW + /UWMDiR5YBam+sxDNbb4A098efMojCLAk06tajexO+Azo7a4uDQsMDRTZQPGnKYiAVAuQNgaC8zF + WBaCc1VoeOKvq4tk0FG0NaRXJnfhl0FqJWka2CLftTPFaJ+4WHGz+MiNSUNtusCzd/esQYVFhAoz + pKbur1jiHmIKgEq6ppVUVZctAhgqFw1y/fqVUkLtK5Htr1BVnCqmltaux45hPLg3bBWF6de41Z6w + rB0xrGV5AF5XPlhYDgTYq4vxmbS1oysZ7emjH1gDkFAh5U1cTwYywhBpqleO9RRPiFPA+uf6igl0 + W3wzCOuAjrmvmGhxKBCkSb9LaZPrEJBzZO3K7CFqlXY8Z364gWCASGHXJz8y7whnRlneliOjyXgb + qu4t5Omjksv/AGItXUQRvRjxz5ll1YFGSuWXQyCZrPUoIvIub3RUetgWBpSP+x9IYfH6y5KJbd4J + YgVMDssPqyMugBNH72RUqDLDwPX75lthKRMZcPDX4ljNGH5efqNixUO7bb+bhTZrhXR7/mInIWVD + 287hkXMHHKHxMJym0PjnMBIdza6D/hr3BIfBoHRz6i9RYU25fWoFOVgTktA7rnzAthU0C6gpirhV + k0/u/iNCHfuv/wB+mLYEZKFpojs5yWcXx/kLungVuP4AO2Y1YeFiACig8wqJZCTtwioccRfFGjot + lmeoSDVqNijm3wbFdR10oDJmsXlz+4hcS3w0yiKwiUwzg3cNvQdMit+XG85hBU9ZXe6vkel6qZYs + Tsms89Y1QalVK3KTPz8vmDzwugKtpsMbvUSULABW+jTr2D3HCOpppX3b1MPRQ5H8dfErFqyX+FG+ + yXIxEtAICLoLoLvxMkVm55yy/thAaM04fxXuU1IRYOLgd6rPnmLG2kTD7Gv3cOWuVdPBfGYSQMRB + 8KaPV/zGxAE5jWKfHUYaxcppaVU1m8XvzLEg6UooLQ2aaPUWVaDU2Fw2cWY4zEZWGlD32IbRrIwP + kb7fzORDgVY0G8X9lypFilreG+riEgSDRSvwt4fZxFzd3LP5JjiSQMtZ55ruuOEpVqqpCoeKPzD2 + oZpSbaHRu/cVUlYJFT+evqBDuXlm+h79zZjhbtw1i/6pxdS6QMl2xa/qWIABMF15f3iKN5awwzDE + dAn1ULwUVO7DbHIsDwPkOeKIxDGHitfDeMcxcFrKZP7eYpLItqi3j1AZMfjHEQALk2+IU3GAPmh5 + YY5CBux31K7pUE4D+xlzy6le3/s9ddHQcRqxYHyRb+4NNV1BxWD8XBUihdCu/IrrG4ZqbRKs/Jum + AFSI2OzxDxiWXJ56uCFvc75mLXbZo17lQEgMKpS/xNqZBYds1AzjUtRUc0EO5R06v+IaX0bgM3FD + KMeBgoOIRgsxvr/yZ4W413MLyaSDYw4m9f8AIuSOTrs/0QqcKuw26yGpWC1mi2eL/wA+4pz7IbL/ + AOzPRthTrnOvzBc9HbBCM2woWxeN7pErL6mFPIL2y8NHG3HzLL2UIDPil3XiLHBYb6M3q86aiDdJ + SFF3486m0l0EDSoaJ3z4qO6AyWgPFxwDDYJfuyUEljZDxeOc/mBdddRxjuuqhdezBC4Ngix9KYx9 + gEY1b8O4D6mia293V7/dopJathxeNdQrB49Kr3K9EkoWC/o98R2gMByBhre/xE4ibtlQc8+4c3WA + WQBdEpLx8Rue83d3+YgsNgtSbcUOc5uFafAXIdH4/wBzCJJUAUWVwMOM4r7foDUttGDDr48IYElM + jWBdLzv6hcyUqG+TpNwjcCqM1m/rHqCh1gq0l4zety2rpjahLqnrLFSPXtiZfkzDFtHZ3Rrxr8E2 + NTjlCrdlnwXG8SQK2ZbzCbBRzyoay9G+viHaGU3peE6bl0+ltqxfiI1xWI3Ze7b3S94gQWNdbv4+ + HvfEKGZwVGPMamrWu5CvIQcJa+a1M4pYjmKMYK/cQo94OF/5D/WLGEpuuN4gVBtt4wi1n3AFaYS6 + dWfEvAbC8BPN8ylPDs0P7+ZiShaM38wpwC2YTNHqWIAZzReZXEuKaclx3giYqq3n97j60rwqv+pn + pQdg3/cuDbNNJEhrYxQs4a9RiOCG3H54nChSdxFLdmoFEJo7OPxKzF7ltXA8/wDY2vhZ1cwzaJxe + 0DzC4JDq8j+rfMqHK0bB2Y9V81GawJHQt0+8wbwXBwP+S/gXHZS9vwX+I30jdd0mDEHRtqP/AFRI + oXbmUrSVn93BMjmqoyC2uMv1KyRyut8aS/HCxXYiAGicFOIEykqNKIXi/UYIigY0b46bD4gQAgRN + Kw51qosjup3Z4XTYfEqYAYQ18RkuhTAj03d/xM4GRGNetQWqGbqni31+1FVC4Da2TqrghLXcRWm2 + bozhPnUplAykt7XvXUfUYK7fFppwkdjUErS906HXEErs5FhcWLzrOY1oqwjAaaDWSNGr2k8FKXuE + zEFRleXlo3/koyqgwmzYuAty1edSputFC9EHPn5xiouUiwBvu+uHkzBvLNJkWkrjCTBiKibPHiWK + DYcPA9uNy8rCoQoS+Ct2d41Cl3BSd0d0Z+ZQsZzAsYd9hgDoodAvnnmOxS1sreQA5DHMvDiGyeQL + x5isTwGcAiZrLFFzEOVnjNu9cZ8zEIG7PlfhfeYTK4wgDm6v3Z8wi0BXNpKctuJYlGwiCqb5p2cw + 11KojkU3R2kGamKIe48yqHRVIOtSsvIJ3m604YE2Fa5Wc0Vrz/ETFEbkpY/EV4Shtoxp5iqMZrkB + 4KgBaW1yY6gyocDOGn6jlhszYG3viKkU5S8ObxY3/wAhCqQWstnGufPmZ4WhBkc1m91xBeBloawq + r5z+JVWb5Mwb5Njfk0f36lgHGy6H5Of6llisg5htUrHbX2qJ6AaGD36/2IFXUW58XDbWpR9j+8Sq + NWb3FWt8c/crFhslyHmBmprm2eyUHEAYh7P8lvexh5/bis0VFr1kpT/Yx0hQNvRX5l3csyleiZQ8 + 3dFq175YsYM2ut6+iI8U7OgD+PzEAQKJbYqG6hQXHgD7/MznAVCU2qKbUwX2y8YAFEMNly3vcwUw + UR1vzxCvZldlWG7CkZZNcCOjdvnNY67gzWdg2h4/DGurnjdzyBf7XwNBueBeDNOr/qWjC4w3WNep + YLaVY+1v9qCmL5S4DdKXV8xsRdhBa1WFvdtfiZdhLK9BNcH7kJVxPIO675zLQ/bBFNuHWyYI4YUt + zeDXHUrFVbAbyfCAZ/biOtu68M2DvL3M8GWMLWyzvg6lFk9rYXo2VmpaaDxApOW2645YWxm9DVXz + ycZ/EUYQVKDFUeK4b1DWGEoJazAe+5WULGxK6fPOPEWGlobHirPi/wAQDfPTxoqhHnW9y+KcxqoZ + Rw55rJWIpkg2BW709ymGJlCjjzs47+JZ0qzOV/v0MIHEdERyJXGribsGoYBx27i2EPgPiFb0FSRL + /N/iJha4pKUNjum/M3wHa6K+i/nUNPKyrTreOj7g3YNhGhmGvPNyiriVDHFPunzLWBCojvoNJnnM + CruluN1fJ+5l/UBsNxbocFwAtstWyxYtU5MEQ0e5YoTzWbigLpcKbx7TzHXlokUr4JzeOZTTbTT4 + 8QwYvAH73LYqXUqnL7FleupgAtCOxy/MCyJYCLfF76s1E2lHGaIPxjXm5TpRsRGHkmE+I27LhXne + Cm9PXuOUVVbV5GtBn7gTnw8wHD/JzC2CAC+nrccptoWSv5S8TJI7w3XP/YMBAkFNn/Q+phFBBXSu + InKFDajWHzBFs0NNFnXfE4RaLwDfHuVjLI8l4A5lLUO8SiopeCqjjzsDnDNvOs76iUy2xEp6/uAC + QXAtwHOXT8eZbAFrrSyvmDjhYnOm/EBYUE017+4d66IWpxj4mIRtiVW8D49TEAVM2evuUIlddqJs + vjCWb3mJswoiwprVX/Up7ybDwxmjzjwRAvHBWFxr44qHKXRqlPGr3r3CiVo5VtQXn7Zi7aFlJquL + L8yhTpq+nTEqC66IJV1oHXXiH41gpiZ1VZwVxuZHdQDTea6cUP8AFx9qAXUioFDrJeE3zMK6qhnd + dG894lLOhscJdeeYDFzNLQs9eoiUauLlrsryahWyGCr6o6z577lfJacVDy+w+o4DdVIDc2Cuf+y2 + MroMBhTQksLBFsLRgLsrcGVtRLXhp/H1BpVkoTNpRhP4lu5PVQ1g3dY+8x0zIQUK5HKFheMxxhGu + KXim/rzK1rDRbr0w2c1cqGOhFD0o8cy3BczUOQ128+Y+uKHRam8PHmFRaoGwe6sc/JLrIqNQRdLy + uIewmGZC9I5Ov8hZXWyWIUrhDEvqaNGVqS7wH8EQ54jpV52+u2BLqKpVDkOnL8VEhqkUU7nArRDO + M1mKCVUazZ38wRHWLbHzcaawOmbDmVHNFNgNRDFH5SqjsKtZe2Vlqaz3lFUAQJS6deOcQaXgwaBw + 06amb+sHRQL1BMi8lQ33ziKNgco5BZ5S/wA+IDBFSiuQpEr8ytVotQzVmbjW7PBp0df1HwWyzId6 + uVqLusu2/nEcjSosuOKt1W5YZWtFaDrthCkbqtbs3HJS2NoEbl27JK8xVudZfrvxKUg3L3n0/Ny5 + gem9cnu4h2StZafMMvNYytedH8zDVvKObYspglaNc+2KVADsFsznmr4hYzRWz21f5iKlLzTDi6+Z + UUGQBZu3PHOsdxMBcVy2XK5qhgMJdV6JbTGzRk/2WkXdgxyMxBoGx1TQ/LuVg7OKMN04lw4aQ4Hv + XOOoYe8hLDx+7iFBWWZNb1n+8VKOwCwvD65/iJcQFBvxKBJsADl8YxWMaauZyQbLDGLqMiapsoL8 + Y06/5EsgIzd1hqBCxkL+1KyEC1S23AC19G4XCVzdFeL/ALgWhhYGa8Db/EuLbdbjlpecxLrpVL53 + l/2AXEUbKLwjfUPmZgKB0F/MUkTsK/PkYho1Qivu6tou84jeRKFL5LUsNPDCRmoVyNcN1ebjq5IX + q638dd3zGSLTav27u4aYUUKcg2Oaw/E0dbKjL1/UUGKsGiuPTAsOBDWDYW/UthdZUlrfL/yIC1oC + tfviACGDWowari/qDfgl1SYKHe45fgoFf4b9S/ioAoJug4JZYhIauVW2tuiZ1ldzT2NZzxFpS00J + glItdiuoy15hT1cXKgBjlW/5jpAMclivLdRoOK6TuOHAax9M5l6w9NT/2Q== + headers: + Accept-Ranges: + - bytes + Access-Control-Allow-Origin: + - https://huggingface.co + Access-Control-Expose-Headers: + - X-Repo-Commit,X-Request-Id,X-Error-Code,X-Error-Message,X-Total-Count,ETag,Link,Accept-Ranges,Content-Range + Connection: + - keep-alive + Content-Disposition: + - inline; filename*=UTF-8''tiger.jpg; filename="tiger.jpg"; + Content-Length: + - '62971' + Content-Security-Policy: + - default-src 'none'; sandbox + Content-Type: + - image/jpeg + Date: + - Tue, 29 Oct 2024 18:12:01 GMT + ETag: + - '"3875afc946bd1cc6b4305ff74045c12164f95326"' + Referrer-Policy: + - strict-origin-when-cross-origin + Vary: + - Origin + Via: + - 1.1 7c47873958412986e58bd33ceab774ec.cloudfront.net (CloudFront) + X-Amz-Cf-Id: + - DwBfUgYMEeMyphsPBqOltWTk68QW5XCdT0jXST93Fav_lcCmgTo2DA== + X-Amz-Cf-Pop: + - CCU50-P1 + X-Cache: + - Miss from cloudfront + X-Powered-By: + - huggingface-moon + X-Repo-Commit: + - d6d15eabfd53d7a15dd553513e33262f320b210d + X-Request-Id: + - Root=1-67212571-32bf7f2069f68b2f60afef12;b0bb406e-348e-476c-85ab-36fe49abadce + cross-origin-opener-policy: + - same-origin + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate, br, zstd + Connection: + - keep-alive + X-Amzn-Trace-Id: + - f12d0ed0-2c54-4aad-bd9f-96db4dff7e8d + user-agent: + - unknown/None; hf_hub/0.26.2; python/3.9.12; torch/2.4.1 + method: GET + uri: https://huggingface.co/api/tasks + response: + body: + string: "{\"any-to-any\":{\"datasets\":[],\"demo\":{\"inputs\":[],\"outputs\":[]},\"isPlaceholder\":true,\"metrics\":[],\"models\":[],\"spaces\":[],\"summary\":\"\",\"widgetModels\":[],\"id\":\"any-to-any\",\"label\":\"Any-to-Any\",\"libraries\":[\"transformers\"]},\"audio-classification\":{\"datasets\":[{\"description\":\"A + benchmark of 10 different audio tasks.\",\"id\":\"s3prl/superb\"},{\"description\":\"A + dataset of YouTube clips and their sound categories.\",\"id\":\"agkphysics/AudioSet\"}],\"demo\":{\"inputs\":[{\"filename\":\"audio.wav\",\"type\":\"audio\"}],\"outputs\":[{\"data\":[{\"label\":\"Up\",\"score\":0.2},{\"label\":\"Down\",\"score\":0.8}],\"type\":\"chart\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"An + easy-to-use model for command recognition.\",\"id\":\"speechbrain/google_speech_command_xvector\"},{\"description\":\"An + emotion recognition model.\",\"id\":\"ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition\"},{\"description\":\"A + language identification model.\",\"id\":\"facebook/mms-lid-126\"}],\"spaces\":[{\"description\":\"An + application that can classify music into different genre.\",\"id\":\"kurianbenoy/audioclassification\"}],\"summary\":\"Audio + classification is the task of assigning a label or class to a given audio. + It can be used for recognizing which command a user is giving or the emotion + of a statement, as well as identifying a speaker.\",\"widgetModels\":[\"MIT/ast-finetuned-audioset-10-10-0.4593\"],\"youtubeId\":\"KWwzcmG98Ds\",\"id\":\"audio-classification\",\"label\":\"Audio + Classification\",\"libraries\":[\"speechbrain\",\"transformers\",\"transformers.js\"]},\"audio-to-audio\":{\"datasets\":[{\"description\":\"512-element + X-vector embeddings of speakers from CMU ARCTIC dataset.\",\"id\":\"Matthijs/cmu-arctic-xvectors\"}],\"demo\":{\"inputs\":[{\"filename\":\"input.wav\",\"type\":\"audio\"}],\"outputs\":[{\"filename\":\"label-0.wav\",\"type\":\"audio\"},{\"filename\":\"label-1.wav\",\"type\":\"audio\"}]},\"metrics\":[{\"description\":\"The + Signal-to-Noise ratio is the relationship between the target signal level + and the background noise level. It is calculated as the logarithm of the target + signal divided by the background noise, in decibels.\",\"id\":\"snri\"},{\"description\":\"The + Signal-to-Distortion ratio is the relationship between the target signal and + the sum of noise, interference, and artifact errors\",\"id\":\"sdri\"}],\"models\":[{\"description\":\"A + solid model of audio source separation.\",\"id\":\"speechbrain/sepformer-wham\"},{\"description\":\"A + speech enhancement model.\",\"id\":\"ResembleAI/resemble-enhance\"},{\"description\":\"A + model that can change the voice in a speech recording.\",\"id\":\"microsoft/speecht5_vc\"}],\"spaces\":[{\"description\":\"An + application for speech separation.\",\"id\":\"younver/speechbrain-speech-separation\"},{\"description\":\"An + application for audio style transfer.\",\"id\":\"nakas/audio-diffusion_style_transfer\"}],\"summary\":\"Audio-to-Audio + is a family of tasks in which the input is an audio and the output is one + or multiple generated audios. Some example tasks are speech enhancement and + source separation.\",\"widgetModels\":[\"speechbrain/sepformer-wham\"],\"youtubeId\":\"iohj7nCCYoM\",\"id\":\"audio-to-audio\",\"label\":\"Audio-to-Audio\",\"libraries\":[\"asteroid\",\"fairseq\",\"speechbrain\"]},\"automatic-speech-recognition\":{\"datasets\":[{\"description\":\"31,175 + hours of multilingual audio-text dataset in 108 languages.\",\"id\":\"mozilla-foundation/common_voice_17_0\"},{\"description\":\"A + dataset with 44.6k hours of English speaker data and 6k hours of other language + speakers.\",\"id\":\"parler-tts/mls_eng\"},{\"description\":\"A multi-lingual + audio dataset with 370K hours of audio.\",\"id\":\"espnet/yodas\"}],\"demo\":{\"inputs\":[{\"filename\":\"input.flac\",\"type\":\"audio\"}],\"outputs\":[{\"label\":\"Transcript\",\"content\":\"Going + along slushy country roads and speaking to damp audiences in...\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"wer\"},{\"description\":\"\",\"id\":\"cer\"}],\"models\":[{\"description\":\"A + powerful ASR model by OpenAI.\",\"id\":\"openai/whisper-large-v3\"},{\"description\":\"A + good generic speech model by MetaAI for fine-tuning.\",\"id\":\"facebook/w2v-bert-2.0\"},{\"description\":\"An + end-to-end model that performs ASR and Speech Translation by MetaAI.\",\"id\":\"facebook/seamless-m4t-v2-large\"},{\"description\":\"Powerful + speaker diarization model.\",\"id\":\"pyannote/speaker-diarization-3.1\"}],\"spaces\":[{\"description\":\"A + powerful general-purpose speech recognition application.\",\"id\":\"hf-audio/whisper-large-v3\"},{\"description\":\"Fastest + speech recognition application.\",\"id\":\"sanchit-gandhi/whisper-jax\"},{\"description\":\"A + high quality speech and text translation model by Meta.\",\"id\":\"facebook/seamless_m4t\"}],\"summary\":\"Automatic + Speech Recognition (ASR), also known as Speech to Text (STT), is the task + of transcribing a given audio to text. It has many applications, such as voice + user interfaces.\",\"widgetModels\":[\"openai/whisper-large-v3\"],\"youtubeId\":\"TksaY_FDgnk\",\"id\":\"automatic-speech-recognition\",\"label\":\"Automatic + Speech Recognition\",\"libraries\":[\"espnet\",\"nemo\",\"speechbrain\",\"transformers\",\"transformers.js\"]},\"depth-estimation\":{\"datasets\":[{\"description\":\"NYU + Depth V2 Dataset: Video dataset containing both RGB and depth sensor data.\",\"id\":\"sayakpaul/nyu_depth_v2\"},{\"description\":\"Monocular + depth estimation benchmark based without noise and errors.\",\"id\":\"depth-anything/DA-2K\"}],\"demo\":{\"inputs\":[{\"filename\":\"depth-estimation-input.jpg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"depth-estimation-output.png\",\"type\":\"img\"}]},\"metrics\":[],\"models\":[{\"description\":\"Cutting-edge + depth estimation model.\",\"id\":\"depth-anything/Depth-Anything-V2-Large\"},{\"description\":\"A + strong monocular depth estimation model.\",\"id\":\"jingheya/lotus-depth-g-v1-0\"},{\"description\":\"A + depth estimation model that predicts depth in videos.\",\"id\":\"tencent/DepthCrafter\"},{\"description\":\"A + robust depth estimation model.\",\"id\":\"apple/DepthPro\"}],\"spaces\":[{\"description\":\"An + application that predicts the depth of an image and then reconstruct the 3D + model as voxels.\",\"id\":\"radames/dpt-depth-estimation-3d-voxels\"},{\"description\":\"An + application for bleeding-edge depth estimation.\",\"id\":\"akhaliq/depth-pro\"},{\"description\":\"An + application on cutting-edge depth estimation in videos.\",\"id\":\"tencent/DepthCrafter\"},{\"description\":\"A + human-centric depth estimation application.\",\"id\":\"facebook/sapiens-depth\"}],\"summary\":\"Depth + estimation is the task of predicting depth of the objects present in an image.\",\"widgetModels\":[\"\"],\"youtubeId\":\"\",\"id\":\"depth-estimation\",\"label\":\"Depth + Estimation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"document-question-answering\":{\"datasets\":[{\"description\":\"Largest + document understanding dataset.\",\"id\":\"HuggingFaceM4/Docmatix\"},{\"description\":\"Dataset + from the 2020 DocVQA challenge. The documents are taken from the UCSF Industry + Documents Library.\",\"id\":\"eliolio/docvqa\"}],\"demo\":{\"inputs\":[{\"label\":\"Question\",\"content\":\"What + is the idea behind the consumer relations efficiency team?\",\"type\":\"text\"},{\"filename\":\"document-question-answering-input.png\",\"type\":\"img\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"Balance + cost efficiency with quality customer service\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"The + evaluation metric for the DocVQA challenge is the Average Normalized Levenshtein + Similarity (ANLS). This metric is flexible to character regognition errors + and compares the predicted answer with the ground truth answer.\",\"id\":\"anls\"},{\"description\":\"Exact + Match is a metric based on the strict character match of the predicted answer + and the right answer. For answers predicted correctly, the Exact Match will + be 1. Even if only one character is different, Exact Match will be 0\",\"id\":\"exact-match\"}],\"models\":[{\"description\":\"A + robust document question answering model.\",\"id\":\"impira/layoutlm-document-qa\"},{\"description\":\"A + document question answering model specialized in invoices.\",\"id\":\"impira/layoutlm-invoices\"},{\"description\":\"A + special model for OCR-free document question answering.\",\"id\":\"microsoft/udop-large\"},{\"description\":\"A + powerful model for document question answering.\",\"id\":\"google/pix2struct-docvqa-large\"}],\"spaces\":[{\"description\":\"A + robust document question answering application.\",\"id\":\"impira/docquery\"},{\"description\":\"An + application that can answer questions from invoices.\",\"id\":\"impira/invoices\"},{\"description\":\"An + application to compare different document question answering models.\",\"id\":\"merve/compare_docvqa_models\"}],\"summary\":\"Document + Question Answering (also known as Document Visual Question Answering) is the + task of answering questions on document images. Document question answering + models take a (document, question) pair as input and return an answer in natural + language. Models usually rely on multi-modal features, combining text, position + of words (bounding-boxes) and image.\",\"widgetModels\":[\"impira/layoutlm-invoices\"],\"youtubeId\":\"\",\"id\":\"document-question-answering\",\"label\":\"Document + Question Answering\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"feature-extraction\":{\"datasets\":[{\"description\":\"Wikipedia + dataset containing cleaned articles of all languages. Can be used to train + `feature-extraction` models.\",\"id\":\"wikipedia\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"India, + officially the Republic of India, is a country in South Asia.\",\"type\":\"text\"}],\"outputs\":[{\"table\":[[\"Dimension + 1\",\"Dimension 2\",\"Dimension 3\"],[\"2.583383083343506\",\"2.757075071334839\",\"0.9023529887199402\"],[\"8.29393482208252\",\"1.1071064472198486\",\"2.03399395942688\"],[\"-0.7754912972450256\",\"-1.647324562072754\",\"-0.6113331913948059\"],[\"0.07087723910808563\",\"1.5942802429199219\",\"1.4610432386398315\"]],\"type\":\"tabular\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + powerful feature extraction model for natural language processing tasks.\",\"id\":\"thenlper/gte-large\"},{\"description\":\"A + strong feature extraction model for retrieval.\",\"id\":\"Alibaba-NLP/gte-Qwen1.5-7B-instruct\"}],\"spaces\":[{\"description\":\"A + leaderboard to rank text feature extraction models based on a benchmark.\",\"id\":\"mteb/leaderboard\"},{\"description\":\"A + leaderboard to rank best feature extraction models based on human feedback.\",\"id\":\"mteb/arena\"}],\"summary\":\"Feature + extraction is the task of extracting features learnt in a model.\",\"widgetModels\":[\"facebook/bart-base\"],\"id\":\"feature-extraction\",\"label\":\"Feature + Extraction\",\"libraries\":[\"sentence-transformers\",\"transformers\",\"transformers.js\"]},\"fill-mask\":{\"datasets\":[{\"description\":\"A + common dataset that is used to train models for many languages.\",\"id\":\"wikipedia\"},{\"description\":\"A + large English dataset with text crawled from the web.\",\"id\":\"c4\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"The + barked at me\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"wolf\",\"score\":0.487},{\"label\":\"dog\",\"score\":0.061},{\"label\":\"cat\",\"score\":0.058},{\"label\":\"fox\",\"score\":0.047},{\"label\":\"squirrel\",\"score\":0.025}]}]},\"metrics\":[{\"description\":\"Cross + Entropy is a metric that calculates the difference between two probability + distributions. Each probability distribution is the distribution of predicted + words\",\"id\":\"cross_entropy\"},{\"description\":\"Perplexity is the exponential + of the cross-entropy loss. It evaluates the probabilities assigned to the + next word by the model. Lower perplexity indicates better performance\",\"id\":\"perplexity\"}],\"models\":[{\"description\":\"The + famous BERT model.\",\"id\":\"google-bert/bert-base-uncased\"},{\"description\":\"A + multilingual model trained on 100 languages.\",\"id\":\"FacebookAI/xlm-roberta-base\"}],\"spaces\":[],\"summary\":\"Masked + language modeling is the task of masking some of the words in a sentence and + predicting which words should replace those masks. These models are useful + when we want to get a statistical understanding of the language in which the + model is trained in.\",\"widgetModels\":[\"distilroberta-base\"],\"youtubeId\":\"mqElG5QJWUg\",\"id\":\"fill-mask\",\"label\":\"Fill-Mask\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"image-classification\":{\"datasets\":[{\"description\":\"Benchmark + dataset used for image classification with images that belong to 100 classes.\",\"id\":\"cifar100\"},{\"description\":\"Dataset + consisting of images of garments.\",\"id\":\"fashion_mnist\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-classification-input.jpeg\",\"type\":\"img\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Egyptian + cat\",\"score\":0.514},{\"label\":\"Tabby cat\",\"score\":0.193},{\"label\":\"Tiger + cat\",\"score\":0.068}]}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + strong image classification model.\",\"id\":\"google/vit-base-patch16-224\"},{\"description\":\"A + robust image classification model.\",\"id\":\"facebook/deit-base-distilled-patch16-224\"},{\"description\":\"A + strong image classification model.\",\"id\":\"facebook/convnext-large-224\"}],\"spaces\":[{\"description\":\"An + application that classifies what a given image is about.\",\"id\":\"nielsr/perceiver-image-classification\"}],\"summary\":\"Image + classification is the task of assigning a label or class to an entire image. + Images are expected to have only one class for each image. Image classification + models take an image as input and return a prediction about which class the + image belongs to.\",\"widgetModels\":[\"google/vit-base-patch16-224\"],\"youtubeId\":\"tjAIM7BOYhw\",\"id\":\"image-classification\",\"label\":\"Image + Classification\",\"libraries\":[\"keras\",\"timm\",\"transformers\",\"transformers.js\"]},\"image-feature-extraction\":{\"datasets\":[{\"description\":\"ImageNet-1K + is a image classification dataset in which images are used to train image-feature-extraction + models.\",\"id\":\"imagenet-1k\"}],\"demo\":{\"inputs\":[{\"filename\":\"mask-generation-input.png\",\"type\":\"img\"}],\"outputs\":[{\"table\":[[\"Dimension + 1\",\"Dimension 2\",\"Dimension 3\"],[\"0.21236686408519745\",\"1.0919708013534546\",\"0.8512550592422485\"],[\"0.809657871723175\",\"-0.18544459342956543\",\"-0.7851548194885254\"],[\"1.3103108406066895\",\"-0.2479034662246704\",\"-0.9107287526130676\"],[\"1.8536205291748047\",\"-0.36419737339019775\",\"0.09717650711536407\"]],\"type\":\"tabular\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + powerful image feature extraction model.\",\"id\":\"timm/vit_large_patch14_dinov2.lvd142m\"},{\"description\":\"A + strong image feature extraction model.\",\"id\":\"nvidia/MambaVision-T-1K\"},{\"description\":\"A + robust image feature extraction model.\",\"id\":\"facebook/dino-vitb16\"},{\"description\":\"Strong + image feature extraction model made for information retrieval from documents.\",\"id\":\"vidore/colpali\"},{\"description\":\"Strong + image feature extraction model that can be used on images and documents.\",\"id\":\"OpenGVLab/InternViT-6B-448px-V1-2\"}],\"spaces\":[],\"summary\":\"Image + feature extraction is the task of extracting features learnt in a computer + vision model.\",\"widgetModels\":[],\"id\":\"image-feature-extraction\",\"label\":\"Image + Feature Extraction\",\"libraries\":[\"timm\",\"transformers\"]},\"image-segmentation\":{\"datasets\":[{\"description\":\"Scene + segmentation dataset.\",\"id\":\"scene_parse_150\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-segmentation-input.jpeg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"image-segmentation-output.png\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"Average + Precision (AP) is the Area Under the PR Curve (AUC-PR). It is calculated for + each semantic class separately\",\"id\":\"Average Precision\"},{\"description\":\"Mean + Average Precision (mAP) is the overall average of the AP values\",\"id\":\"Mean + Average Precision\"},{\"description\":\"Intersection over Union (IoU) is the + overlap of segmentation masks. Mean IoU is the average of the IoU of all semantic + classes\",\"id\":\"Mean Intersection over Union\"},{\"description\":\"AP\u03B1 + is the Average Precision at the IoU threshold of a \u03B1 value, for example, + AP50 and AP75\",\"id\":\"AP\u03B1\"}],\"models\":[{\"description\":\"Solid + semantic segmentation model trained on ADE20k.\",\"id\":\"openmmlab/upernet-convnext-small\"},{\"description\":\"Background + removal model.\",\"id\":\"briaai/RMBG-1.4\"},{\"description\":\"A multipurpose + image segmentation model for high resolution images.\",\"id\":\"ZhengPeng7/BiRefNet\"},{\"description\":\"Powerful + human-centric image segmentation model.\",\"id\":\"facebook/sapiens-seg-1b\"},{\"description\":\"Panoptic + segmentation model trained on the COCO (common objects) dataset.\",\"id\":\"facebook/mask2former-swin-large-coco-panoptic\"}],\"spaces\":[{\"description\":\"A + semantic segmentation application that can predict unseen instances out of + the box.\",\"id\":\"facebook/ov-seg\"},{\"description\":\"One of the strongest + segmentation applications.\",\"id\":\"jbrinkma/segment-anything\"},{\"description\":\"A + human-centric segmentation model.\",\"id\":\"facebook/sapiens-pose\"},{\"description\":\"An + instance segmentation application to predict neuronal cell types from microscopy + images.\",\"id\":\"rashmi/sartorius-cell-instance-segmentation\"},{\"description\":\"An + application that segments videos.\",\"id\":\"ArtGAN/Segment-Anything-Video\"},{\"description\":\"An + panoptic segmentation application built for outdoor environments.\",\"id\":\"segments/panoptic-segment-anything\"}],\"summary\":\"Image + Segmentation divides an image into segments where each pixel in the image + is mapped to an object. This task has multiple variants such as instance segmentation, + panoptic segmentation and semantic segmentation.\",\"widgetModels\":[\"nvidia/segformer-b0-finetuned-ade-512-512\"],\"youtubeId\":\"dKE8SIt9C-w\",\"id\":\"image-segmentation\",\"label\":\"Image + Segmentation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"image-to-image\":{\"datasets\":[{\"description\":\"Synthetic + dataset, for image relighting\",\"id\":\"VIDIT\"},{\"description\":\"Multiple + images of celebrities, used for facial expression translation\",\"id\":\"huggan/CelebA-faces\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-to-image-input.jpeg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"image-to-image-output.png\",\"type\":\"img\"}]},\"isPlaceholder\":false,\"metrics\":[{\"description\":\"Peak + Signal to Noise Ratio (PSNR) is an approximation of the human perception, + considering the ratio of the absolute intensity with respect to the variations. + Measured in dB, a high value indicates a high fidelity.\",\"id\":\"PSNR\"},{\"description\":\"Structural + Similarity Index (SSIM) is a perceptual metric which compares the luminance, + contrast and structure of two images. The values of SSIM range between -1 + and 1, and higher values indicate closer resemblance to the original image.\",\"id\":\"SSIM\"},{\"description\":\"Inception + Score (IS) is an analysis of the labels predicted by an image classification + model when presented with a sample of the generated images.\",\"id\":\"IS\"}],\"models\":[{\"description\":\"An + image-to-image model to improve image resolution.\",\"id\":\"fal/AuraSR-v2\"},{\"description\":\"A + model that increases the resolution of an image.\",\"id\":\"keras-io/super-resolution\"},{\"description\":\"A + model that creates a set of variations of the input image in the style of + DALL-E using Stable Diffusion.\",\"id\":\"lambdalabs/sd-image-variations-diffusers\"},{\"description\":\"A + model that generates images based on segments in the input image and the text + prompt.\",\"id\":\"mfidabel/controlnet-segment-anything\"},{\"description\":\"A + model that takes an image and an instruction to edit the image.\",\"id\":\"timbrooks/instruct-pix2pix\"}],\"spaces\":[{\"description\":\"Image + enhancer application for low light.\",\"id\":\"keras-io/low-light-image-enhancement\"},{\"description\":\"Style + transfer application.\",\"id\":\"keras-io/neural-style-transfer\"},{\"description\":\"An + application that generates images based on segment control.\",\"id\":\"mfidabel/controlnet-segment-anything\"},{\"description\":\"Image + generation application that takes image control and text prompt.\",\"id\":\"hysts/ControlNet\"},{\"description\":\"Colorize + any image using this app.\",\"id\":\"ioclab/brightness-controlnet\"},{\"description\":\"Edit + images with instructions.\",\"id\":\"timbrooks/instruct-pix2pix\"}],\"summary\":\"Image-to-image + is the task of transforming an input image through a variety of possible manipulations + and enhancements, such as super-resolution, image inpainting, colorization, + and more.\",\"widgetModels\":[\"stabilityai/stable-diffusion-2-inpainting\"],\"youtubeId\":\"\",\"id\":\"image-to-image\",\"label\":\"Image-to-Image\",\"libraries\":[\"diffusers\",\"transformers\",\"transformers.js\"]},\"image-text-to-text\":{\"datasets\":[{\"description\":\"Instructions + composed of image and text.\",\"id\":\"liuhaotian/LLaVA-Instruct-150K\"},{\"description\":\"Conversation + turns where questions involve image and text.\",\"id\":\"liuhaotian/LLaVA-Pretrain\"},{\"description\":\"A + collection of datasets made for model fine-tuning.\",\"id\":\"HuggingFaceM4/the_cauldron\"},{\"description\":\"Screenshots + of websites with their HTML/CSS codes.\",\"id\":\"HuggingFaceM4/WebSight\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-text-to-text-input.png\",\"type\":\"img\"},{\"label\":\"Text + Prompt\",\"content\":\"Describe the position of the bee in detail.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"The + bee is sitting on a pink flower, surrounded by other flowers. The bee is positioned + in the center of the flower, with its head and front legs sticking out.\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"Powerful + vision language model with great visual understanding and reasoning capabilities.\",\"id\":\"meta-llama/Llama-3.2-11B-Vision-Instruct\"},{\"description\":\"Cutting-edge + vision language models.\",\"id\":\"allenai/Molmo-7B-D-0924\"},{\"description\":\"Small + yet powerful model.\",\"id\":\"vikhyatk/moondream2\"},{\"description\":\"Strong + image-text-to-text model.\",\"id\":\"Qwen/Qwen2-VL-7B-Instruct\"},{\"description\":\"Strong + image-text-to-text model.\",\"id\":\"mistralai/Pixtral-12B-2409\"},{\"description\":\"Strong + image-text-to-text model focused on documents.\",\"id\":\"stepfun-ai/GOT-OCR2_0\"}],\"spaces\":[{\"description\":\"Leaderboard + to evaluate vision language models.\",\"id\":\"opencompass/open_vlm_leaderboard\"},{\"description\":\"Vision + language models arena, where models are ranked by votes of users.\",\"id\":\"WildVision/vision-arena\"},{\"description\":\"Powerful + vision-language model assistant.\",\"id\":\"akhaliq/Molmo-7B-D-0924\"},{\"description\":\"An + image-text-to-text application focused on documents.\",\"id\":\"stepfun-ai/GOT_official_online_demo\"},{\"description\":\"An + application to compare outputs of different vision language models.\",\"id\":\"merve/compare_VLMs\"},{\"description\":\"An + application for chatting with an image-text-to-text model.\",\"id\":\"GanymedeNil/Qwen2-VL-7B\"}],\"summary\":\"Image-text-to-text + models take in an image and text prompt and output text. These models are + also called vision-language models, or VLMs. The difference from image-to-text + models is that these models take an additional text input, not restricting + the model to certain use cases like image captioning, and may also be trained + to accept a conversation as input.\",\"widgetModels\":[\"meta-llama/Llama-3.2-11B-Vision-Instruct\"],\"youtubeId\":\"IoGaGfU1CIg\",\"id\":\"image-text-to-text\",\"label\":\"Image-Text-to-Text\",\"libraries\":[\"transformers\"]},\"image-to-text\":{\"datasets\":[{\"description\":\"Dataset + from 12M image-text of Reddit\",\"id\":\"red_caps\"},{\"description\":\"Dataset + from 3.3M images of Google\",\"id\":\"datasets/conceptual_captions\"}],\"demo\":{\"inputs\":[{\"filename\":\"savanna.jpg\",\"type\":\"img\"}],\"outputs\":[{\"label\":\"Detailed + description\",\"content\":\"a herd of giraffes and zebras grazing in a field\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + robust image captioning model.\",\"id\":\"Salesforce/blip2-opt-2.7b\"},{\"description\":\"A + powerful and accurate image-to-text model that can also localize concepts + in images.\",\"id\":\"microsoft/kosmos-2-patch14-224\"},{\"description\":\"A + strong optical character recognition model.\",\"id\":\"facebook/nougat-base\"},{\"description\":\"A + powerful model that lets you have a conversation with the image.\",\"id\":\"llava-hf/llava-1.5-7b-hf\"}],\"spaces\":[{\"description\":\"An + application that compares various image captioning models.\",\"id\":\"nielsr/comparing-captioning-models\"},{\"description\":\"A + robust image captioning application.\",\"id\":\"flax-community/image-captioning\"},{\"description\":\"An + application that transcribes handwritings into text.\",\"id\":\"nielsr/TrOCR-handwritten\"},{\"description\":\"An + application that can caption images and answer questions about a given image.\",\"id\":\"Salesforce/BLIP\"},{\"description\":\"An + application that can caption images and answer questions with a conversational + agent.\",\"id\":\"Salesforce/BLIP2\"},{\"description\":\"An image captioning + application that demonstrates the effect of noise on captions.\",\"id\":\"johko/capdec-image-captioning\"}],\"summary\":\"Image + to text models output a text from a given image. Image captioning or optical + character recognition can be considered as the most common applications of + image to text.\",\"widgetModels\":[\"Salesforce/blip-image-captioning-large\"],\"youtubeId\":\"\",\"id\":\"image-to-text\",\"label\":\"Image-to-Text\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"keypoint-detection\":{\"datasets\":[{\"description\":\"A + dataset of hand keypoints of over 500k examples.\",\"id\":\"Vincent-luo/hagrid-mediapipe-hands\"}],\"demo\":{\"inputs\":[{\"filename\":\"keypoint-detection-input.png\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"keypoint-detection-output.png\",\"type\":\"img\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + robust keypoint detection model.\",\"id\":\"magic-leap-community/superpoint\"},{\"description\":\"Strong + keypoint detection model used to detect human pose.\",\"id\":\"facebook/sapiens-pose-1b\"}],\"spaces\":[{\"description\":\"An + application that detects hand keypoints in real-time.\",\"id\":\"datasciencedojo/Hand-Keypoint-Detection-Realtime\"},{\"description\":\"An + application to try a universal keypoint detection model.\",\"id\":\"merve/SuperPoint\"}],\"summary\":\"Keypoint + detection is the task of identifying meaningful distinctive points or features + in an image.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"keypoint-detection\",\"label\":\"Keypoint + Detection\",\"libraries\":[\"transformers\"]},\"mask-generation\":{\"datasets\":[],\"demo\":{\"inputs\":[{\"filename\":\"mask-generation-input.png\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"mask-generation-output.png\",\"type\":\"img\"}]},\"metrics\":[],\"models\":[{\"description\":\"Small + yet powerful mask generation model.\",\"id\":\"Zigeng/SlimSAM-uniform-50\"},{\"description\":\"Very + strong mask generation model.\",\"id\":\"facebook/sam2-hiera-large\"}],\"spaces\":[{\"description\":\"An + application that combines a mask generation model with a zero-shot object + detection model for text-guided image segmentation.\",\"id\":\"merve/OWLSAM2\"},{\"description\":\"An + application that compares the performance of a large and a small mask generation + model.\",\"id\":\"merve/slimsam\"},{\"description\":\"An application based + on an improved mask generation model.\",\"id\":\"SkalskiP/segment-anything-model-2\"},{\"description\":\"An + application to remove objects from videos using mask generation models.\",\"id\":\"SkalskiP/SAM_and_ProPainter\"}],\"summary\":\"Mask + generation is the task of generating masks that identify a specific object + or region of interest in a given image. Masks are often used in segmentation + tasks, where they provide a precise way to isolate the object of interest + for further processing or analysis.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"mask-generation\",\"label\":\"Mask + Generation\",\"libraries\":[\"transformers\"]},\"object-detection\":{\"datasets\":[{\"description\":\"Widely + used benchmark dataset for multiple vision tasks.\",\"id\":\"merve/coco2017\"},{\"description\":\"Multi-task + computer vision benchmark.\",\"id\":\"merve/pascal-voc\"}],\"demo\":{\"inputs\":[{\"filename\":\"object-detection-input.jpg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"object-detection-output.jpg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + Average Precision (AP) metric is the Area Under the PR Curve (AUC-PR). It + is calculated for each class separately\",\"id\":\"Average Precision\"},{\"description\":\"The + Mean Average Precision (mAP) metric is the overall average of the AP values\",\"id\":\"Mean + Average Precision\"},{\"description\":\"The AP\u03B1 metric is the Average + Precision at the IoU threshold of a \u03B1 value, for example, AP50 and AP75\",\"id\":\"AP\u03B1\"}],\"models\":[{\"description\":\"Solid + object detection model pre-trained on the COCO 2017 dataset.\",\"id\":\"facebook/detr-resnet-50\"},{\"description\":\"Real-time + and accurate object detection model.\",\"id\":\"jameslahm/yolov10x\"},{\"description\":\"Fast + and accurate object detection model trained on COCO and Object365 datasets.\",\"id\":\"PekingU/rtdetr_r18vd_coco_o365\"}],\"spaces\":[{\"description\":\"Leaderboard + to compare various object detection models across several metrics.\",\"id\":\"hf-vision/object_detection_leaderboard\"},{\"description\":\"An + application that contains various object detection models to try from.\",\"id\":\"Gradio-Blocks/Object-Detection-With-DETR-and-YOLOS\"},{\"description\":\"An + application that shows multiple cutting edge techniques for object detection + and tracking.\",\"id\":\"kadirnar/torchyolo\"},{\"description\":\"An object + tracking, segmentation and inpainting application.\",\"id\":\"VIPLab/Track-Anything\"},{\"description\":\"Very + fast object tracking application based on object detection.\",\"id\":\"merve/RT-DETR-tracking-coco\"}],\"summary\":\"Object + Detection models allow users to identify objects of certain defined classes. + Object detection models receive an image as input and output the images with + bounding boxes and labels on detected objects.\",\"widgetModels\":[\"facebook/detr-resnet-50\"],\"youtubeId\":\"WdAeKSOpxhw\",\"id\":\"object-detection\",\"label\":\"Object + Detection\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"video-classification\":{\"datasets\":[{\"description\":\"Benchmark + dataset used for video classification with videos that belong to 400 classes.\",\"id\":\"kinetics400\"}],\"demo\":{\"inputs\":[{\"filename\":\"video-classification-input.gif\",\"type\":\"img\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Playing + Guitar\",\"score\":0.514},{\"label\":\"Playing Tennis\",\"score\":0.193},{\"label\":\"Cooking\",\"score\":0.068}]}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"Strong + Video Classification model trained on the Kinetics 400 dataset.\",\"id\":\"google/vivit-b-16x2-kinetics400\"},{\"description\":\"Strong + Video Classification model trained on the Kinetics 400 dataset.\",\"id\":\"microsoft/xclip-base-patch32\"}],\"spaces\":[{\"description\":\"An + application that classifies video at different timestamps.\",\"id\":\"nateraw/lavila\"},{\"description\":\"An + application that classifies video.\",\"id\":\"fcakyon/video-classification\"}],\"summary\":\"Video + classification is the task of assigning a label or class to an entire video. + Videos are expected to have only one class for each video. Video classification + models take a video as input and return a prediction about which class the + video belongs to.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"video-classification\",\"label\":\"Video + Classification\",\"libraries\":[\"transformers\"]},\"question-answering\":{\"datasets\":[{\"description\":\"A + famous question answering dataset based on English articles from Wikipedia.\",\"id\":\"squad_v2\"},{\"description\":\"A + dataset of aggregated anonymized actual queries issued to the Google search + engine.\",\"id\":\"natural_questions\"}],\"demo\":{\"inputs\":[{\"label\":\"Question\",\"content\":\"Which + name is also used to describe the Amazon rainforest in English?\",\"type\":\"text\"},{\"label\":\"Context\",\"content\":\"The + Amazon rainforest, also known in English as Amazonia or the Amazon Jungle\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"Amazonia\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Exact + Match is a metric based on the strict character match of the predicted answer + and the right answer. For answers predicted correctly, the Exact Match will + be 1. Even if only one character is different, Exact Match will be 0\",\"id\":\"exact-match\"},{\"description\":\" + The F1-Score metric is useful if we value both false positives and false negatives + equally. The F1-Score is calculated on each word in the predicted sequence + against the correct answer\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + robust baseline model for most question answering domains.\",\"id\":\"deepset/roberta-base-squad2\"},{\"description\":\"Small + yet robust model that can answer questions.\",\"id\":\"distilbert/distilbert-base-cased-distilled-squad\"},{\"description\":\"A + special model that can answer questions from tables.\",\"id\":\"google/tapas-base-finetuned-wtq\"}],\"spaces\":[{\"description\":\"An + application that can answer a long question from Wikipedia.\",\"id\":\"deepset/wikipedia-assistant\"}],\"summary\":\"Question + Answering models can retrieve the answer to a question from a given text, + which is useful for searching for an answer in a document. Some question answering + models can generate answers without context!\",\"widgetModels\":[\"deepset/roberta-base-squad2\"],\"youtubeId\":\"ajPx5LwJD-I\",\"id\":\"question-answering\",\"label\":\"Question + Answering\",\"libraries\":[\"adapter-transformers\",\"allennlp\",\"transformers\",\"transformers.js\"]},\"reinforcement-learning\":{\"datasets\":[{\"description\":\"A + curation of widely used datasets for Data Driven Deep Reinforcement Learning + (D4RL)\",\"id\":\"edbeeching/decision_transformer_gym_replay\"}],\"demo\":{\"inputs\":[{\"label\":\"State\",\"content\":\"Red + traffic light, pedestrians are about to pass.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Action\",\"content\":\"Stop + the car.\",\"type\":\"text\"},{\"label\":\"Next State\",\"content\":\"Yellow + light, pedestrians have crossed.\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Accumulated + reward across all time steps discounted by a factor that ranges between 0 + and 1 and determines how much the agent optimizes for future relative to immediate + rewards. Measures how good is the policy ultimately found by a given algorithm + considering uncertainty over the future.\",\"id\":\"Discounted Total Reward\"},{\"description\":\"Average + return obtained after running the policy for a certain number of evaluation + episodes. As opposed to total reward, mean reward considers how much reward + a given algorithm receives while learning.\",\"id\":\"Mean Reward\"},{\"description\":\"Measures + how good a given algorithm is after a predefined time. Some algorithms may + be guaranteed to converge to optimal behavior across many time steps. However, + an agent that reaches an acceptable level of optimality after a given time + horizon may be preferable to one that ultimately reaches optimality but takes + a long time.\",\"id\":\"Level of Performance After Some Time\"}],\"models\":[{\"description\":\"A + Reinforcement Learning model trained on expert data from the Gym Hopper environment\",\"id\":\"edbeeching/decision-transformer-gym-hopper-expert\"},{\"description\":\"A + PPO agent playing seals/CartPole-v0 using the stable-baselines3 library and + the RL Zoo.\",\"id\":\"HumanCompatibleAI/ppo-seals-CartPole-v0\"}],\"spaces\":[{\"description\":\"An + application for a cute puppy agent learning to catch a stick.\",\"id\":\"ThomasSimonini/Huggy\"},{\"description\":\"An + application to play Snowball Fight with a reinforcement learning agent.\",\"id\":\"ThomasSimonini/SnowballFight\"}],\"summary\":\"Reinforcement + learning is the computational approach of learning from action by interacting + with an environment through trial and error and receiving rewards (negative + or positive) as feedback\",\"widgetModels\":[],\"youtubeId\":\"q0BiUn5LiBc\",\"id\":\"reinforcement-learning\",\"label\":\"Reinforcement + Learning\",\"libraries\":[\"transformers\",\"stable-baselines3\",\"ml-agents\",\"sample-factory\"]},\"sentence-similarity\":{\"datasets\":[{\"description\":\"Bing + queries with relevant passages from various web sources.\",\"id\":\"ms_marco\"}],\"demo\":{\"inputs\":[{\"label\":\"Source + sentence\",\"content\":\"Machine learning is so easy.\",\"type\":\"text\"},{\"label\":\"Sentences + to compare to\",\"content\":\"Deep learning is so straightforward.\",\"type\":\"text\"},{\"label\":\"\",\"content\":\"This + is so difficult, like rocket science.\",\"type\":\"text\"},{\"label\":\"\",\"content\":\"I + can't believe how much I struggled with this.\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Deep + learning is so straightforward.\",\"score\":0.623},{\"label\":\"This is so + difficult, like rocket science.\",\"score\":0.413},{\"label\":\"I can't believe + how much I struggled with this.\",\"score\":0.256}]}]},\"metrics\":[{\"description\":\"Reciprocal + Rank is a measure used to rank the relevancy of documents given a set of documents. + Reciprocal Rank is the reciprocal of the rank of the document retrieved, meaning, + if the rank is 3, the Reciprocal Rank is 0.33. If the rank is 1, the Reciprocal + Rank is 1\",\"id\":\"Mean Reciprocal Rank\"},{\"description\":\"The similarity + of the embeddings is evaluated mainly on cosine similarity. It is calculated + as the cosine of the angle between two vectors. It is particularly useful + when your texts are not the same length\",\"id\":\"Cosine Similarity\"}],\"models\":[{\"description\":\"This + model works well for sentences and paragraphs and can be used for clustering/grouping + and semantic searches.\",\"id\":\"sentence-transformers/all-mpnet-base-v2\"},{\"description\":\"A + multilingual robust sentence similarity model..\",\"id\":\"BAAI/bge-m3\"}],\"spaces\":[{\"description\":\"An + application that leverages sentence similarity to answer questions from YouTube + videos.\",\"id\":\"Gradio-Blocks/Ask_Questions_To_YouTube_Videos\"},{\"description\":\"An + application that retrieves relevant PubMed abstracts for a given online article + which can be used as further references.\",\"id\":\"Gradio-Blocks/pubmed-abstract-retriever\"},{\"description\":\"An + application that leverages sentence similarity to summarize text.\",\"id\":\"nickmuchi/article-text-summarizer\"},{\"description\":\"A + guide that explains how Sentence Transformers can be used for semantic search.\",\"id\":\"sentence-transformers/Sentence_Transformers_for_semantic_search\"}],\"summary\":\"Sentence + Similarity is the task of determining how similar two texts are. Sentence + similarity models convert input texts into vectors (embeddings) that capture + semantic information and calculate how close (similar) they are between them. + This task is particularly useful for information retrieval and clustering/grouping.\",\"widgetModels\":[\"BAAI/bge-small-en-v1.5\"],\"youtubeId\":\"VCZq5AkbNEU\",\"id\":\"sentence-similarity\",\"label\":\"Sentence + Similarity\",\"libraries\":[\"sentence-transformers\",\"spacy\",\"transformers.js\"]},\"summarization\":{\"canonicalId\":\"text2text-generation\",\"datasets\":[{\"description\":\"News + articles in five different languages along with their summaries. Widely used + for benchmarking multilingual summarization models.\",\"id\":\"mlsum\"},{\"description\":\"English + conversations and their summaries. Useful for benchmarking conversational + agents.\",\"id\":\"samsum\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"The + tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey + building, and the tallest structure in Paris. Its base is square, measuring + 125 metres (410 ft) on each side. It was the first structure to reach a height + of 300 metres. Excluding transmitters, the Eiffel Tower is the second tallest + free-standing structure in France after the Millau Viaduct.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Output\",\"content\":\"The + tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey + building. It was the first structure to reach a height of 300 metres.\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"The + generated sequence is compared against its summary, and the overlap of tokens + are counted. ROUGE-N refers to overlap of N subsequent tokens, ROUGE-1 refers + to overlap of single tokens and ROUGE-2 is the overlap of two subsequent tokens.\",\"id\":\"rouge\"}],\"models\":[{\"description\":\"A + strong summarization model trained on English news articles. Excels at generating + factual summaries.\",\"id\":\"facebook/bart-large-cnn\"},{\"description\":\"A + summarization model trained on medical articles.\",\"id\":\"Falconsai/medical_summarization\"}],\"spaces\":[{\"description\":\"An + application that can summarize long paragraphs.\",\"id\":\"pszemraj/summarize-long-text\"},{\"description\":\"A + much needed summarization application for terms and conditions.\",\"id\":\"ml6team/distilbart-tos-summarizer-tosdr\"},{\"description\":\"An + application that summarizes long documents.\",\"id\":\"pszemraj/document-summarization\"},{\"description\":\"An + application that can detect errors in abstractive summarization.\",\"id\":\"ml6team/post-processing-summarization\"}],\"summary\":\"Summarization + is the task of producing a shorter version of a document while preserving + its important information. Some models can extract text from the original + input, while other models can generate entirely new text.\",\"widgetModels\":[\"facebook/bart-large-cnn\"],\"youtubeId\":\"yHnr5Dk2zCI\",\"id\":\"summarization\",\"label\":\"Summarization\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"table-question-answering\":{\"datasets\":[{\"description\":\"The + WikiTableQuestions dataset is a large-scale dataset for the task of question + answering on semi-structured tables.\",\"id\":\"wikitablequestions\"},{\"description\":\"WikiSQL + is a dataset of 80654 hand-annotated examples of questions and SQL queries + distributed across 24241 tables from Wikipedia.\",\"id\":\"wikisql\"}],\"demo\":{\"inputs\":[{\"table\":[[\"Rank\",\"Name\",\"No.of + reigns\",\"Combined days\"],[\"1\",\"lou Thesz\",\"3\",\"3749\"],[\"2\",\"Ric + Flair\",\"8\",\"3103\"],[\"3\",\"Harley Race\",\"7\",\"1799\"]],\"type\":\"tabular\"},{\"label\":\"Question\",\"content\":\"What + is the number of reigns for Harley Race?\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Result\",\"content\":\"7\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Checks + whether the predicted answer(s) is the same as the ground-truth answer(s).\",\"id\":\"Denotation + Accuracy\"}],\"models\":[{\"description\":\"A table question answering model + that is capable of neural SQL execution, i.e., employ TAPEX to execute a SQL + query on a given table.\",\"id\":\"microsoft/tapex-base\"},{\"description\":\"A + robust table question answering model.\",\"id\":\"google/tapas-base-finetuned-wtq\"}],\"spaces\":[{\"description\":\"An + application that answers questions based on table CSV files.\",\"id\":\"katanaml/table-query\"}],\"summary\":\"Table + Question Answering (Table QA) is the answering a question about an information + on a given table.\",\"widgetModels\":[\"google/tapas-base-finetuned-wtq\"],\"id\":\"table-question-answering\",\"label\":\"Table + Question Answering\",\"libraries\":[\"transformers\"]},\"tabular-classification\":{\"datasets\":[{\"description\":\"A + comprehensive curation of datasets covering all benchmarks.\",\"id\":\"inria-soda/tabular-benchmark\"}],\"demo\":{\"inputs\":[{\"table\":[[\"Glucose\",\"Blood + Pressure \",\"Skin Thickness\",\"Insulin\",\"BMI\"],[\"148\",\"72\",\"35\",\"0\",\"33.6\"],[\"150\",\"50\",\"30\",\"0\",\"35.1\"],[\"141\",\"60\",\"29\",\"1\",\"39.2\"]],\"type\":\"tabular\"}],\"outputs\":[{\"table\":[[\"Diabetes\"],[\"1\"],[\"1\"],[\"0\"]],\"type\":\"tabular\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"Breast + cancer prediction model based on decision trees.\",\"id\":\"scikit-learn/cancer-prediction-trees\"}],\"spaces\":[{\"description\":\"An + application that can predict defective products on a production line.\",\"id\":\"scikit-learn/tabular-playground\"},{\"description\":\"An + application that compares various tabular classification techniques on different + datasets.\",\"id\":\"scikit-learn/classification\"}],\"summary\":\"Tabular + classification is the task of classifying a target category (a group) based + on set of attributes.\",\"widgetModels\":[\"scikit-learn/tabular-playground\"],\"youtubeId\":\"\",\"id\":\"tabular-classification\",\"label\":\"Tabular + Classification\",\"libraries\":[\"sklearn\"]},\"tabular-regression\":{\"datasets\":[{\"description\":\"A + comprehensive curation of datasets covering all benchmarks.\",\"id\":\"inria-soda/tabular-benchmark\"}],\"demo\":{\"inputs\":[{\"table\":[[\"Car + Name\",\"Horsepower\",\"Weight\"],[\"ford torino\",\"140\",\"3,449\"],[\"amc + hornet\",\"97\",\"2,774\"],[\"toyota corolla\",\"65\",\"1,773\"]],\"type\":\"tabular\"}],\"outputs\":[{\"table\":[[\"MPG + (miles per gallon)\"],[\"17\"],[\"18\"],[\"31\"]],\"type\":\"tabular\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"mse\"},{\"description\":\"Coefficient + of determination (or R-squared) is a measure of how well the model fits the + data. Higher R-squared is considered a better fit.\",\"id\":\"r-squared\"}],\"models\":[{\"description\":\"Fish + weight prediction based on length measurements and species.\",\"id\":\"scikit-learn/Fish-Weight\"}],\"spaces\":[{\"description\":\"An + application that can predict weight of a fish based on set of attributes.\",\"id\":\"scikit-learn/fish-weight-prediction\"}],\"summary\":\"Tabular + regression is the task of predicting a numerical value given a set of attributes.\",\"widgetModels\":[\"scikit-learn/Fish-Weight\"],\"youtubeId\":\"\",\"id\":\"tabular-regression\",\"label\":\"Tabular + Regression\",\"libraries\":[\"sklearn\"]},\"text-classification\":{\"datasets\":[{\"description\":\"A + widely used dataset used to benchmark multiple variants of text classification.\",\"id\":\"nyu-mll/glue\"},{\"description\":\"A + text classification dataset used to benchmark natural language inference models\",\"id\":\"stanfordnlp/snli\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"I + love Hugging Face!\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"POSITIVE\",\"score\":0.9},{\"label\":\"NEUTRAL\",\"score\":0.1},{\"label\":\"NEGATIVE\",\"score\":0}]}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"The + F1 metric is the harmonic mean of the precision and recall. It can be calculated + as: F1 = 2 * (precision * recall) / (precision + recall)\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + robust model trained for sentiment analysis.\",\"id\":\"distilbert/distilbert-base-uncased-finetuned-sst-2-english\"},{\"description\":\"A + sentiment analysis model specialized in financial sentiment.\",\"id\":\"ProsusAI/finbert\"},{\"description\":\"A + sentiment analysis model specialized in analyzing tweets.\",\"id\":\"cardiffnlp/twitter-roberta-base-sentiment-latest\"},{\"description\":\"A + model that can classify languages.\",\"id\":\"papluca/xlm-roberta-base-language-detection\"},{\"description\":\"A + model that can classify text generation attacks.\",\"id\":\"meta-llama/Prompt-Guard-86M\"}],\"spaces\":[{\"description\":\"An + application that can classify financial sentiment.\",\"id\":\"IoannisTr/Tech_Stocks_Trading_Assistant\"},{\"description\":\"A + dashboard that contains various text classification tasks.\",\"id\":\"miesnerjacob/Multi-task-NLP\"},{\"description\":\"An + application that analyzes user reviews in healthcare.\",\"id\":\"spacy/healthsea-demo\"}],\"summary\":\"Text + Classification is the task of assigning a label or class to a given text. + Some use cases are sentiment analysis, natural language inference, and assessing + grammatical correctness.\",\"widgetModels\":[\"distilbert/distilbert-base-uncased-finetuned-sst-2-english\"],\"youtubeId\":\"leNG9fN9FQU\",\"id\":\"text-classification\",\"label\":\"Text + Classification\",\"libraries\":[\"adapter-transformers\",\"setfit\",\"spacy\",\"transformers\",\"transformers.js\"]},\"text-generation\":{\"datasets\":[{\"description\":\"A + large multilingual dataset of text crawled from the web.\",\"id\":\"mc4\"},{\"description\":\"Diverse + open-source data consisting of 22 smaller high-quality datasets. It was used + to train GPT-Neo.\",\"id\":\"the_pile\"},{\"description\":\"Truly open-source, + curated and cleaned dialogue dataset.\",\"id\":\"HuggingFaceH4/ultrachat_200k\"},{\"description\":\"An + instruction dataset with preference ratings on responses.\",\"id\":\"openbmb/UltraFeedback\"},{\"description\":\"A + large synthetic dataset for alignment of text generation models.\",\"id\":\"argilla/magpie-ultra-v0.1\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"Once + upon a time,\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Output\",\"content\":\"Once + upon a time, we knew that our ancestors were on the verge of extinction. The + great explorers and poets of the Old World, from Alexander the Great to Chaucer, + are dead and gone. A good many of our ancient explorers and poets have\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Cross + Entropy is a metric that calculates the difference between two probability + distributions. Each probability distribution is the distribution of predicted + words\",\"id\":\"Cross Entropy\"},{\"description\":\"The Perplexity metric + is the exponential of the cross-entropy loss. It evaluates the probabilities + assigned to the next word by the model. Lower perplexity indicates better + performance\",\"id\":\"Perplexity\"}],\"models\":[{\"description\":\"A text-generation + model trained to follow instructions.\",\"id\":\"google/gemma-2-2b-it\"},{\"description\":\"Very + powerful text generation model trained to follow instructions.\",\"id\":\"meta-llama/Meta-Llama-3.1-8B-Instruct\"},{\"description\":\"Small + yet powerful text generation model.\",\"id\":\"microsoft/Phi-3-mini-4k-instruct\"},{\"description\":\"A + very powerful model that can solve mathematical problems.\",\"id\":\"AI-MO/NuminaMath-7B-TIR\"},{\"description\":\"Strong + text generation model to follow instructions.\",\"id\":\"Qwen/Qwen2.5-7B-Instruct\"},{\"description\":\"Very + strong open-source large language model.\",\"id\":\"nvidia/Llama-3.1-Nemotron-70B-Instruct\"}],\"spaces\":[{\"description\":\"A + leaderboard to compare different open-source text generation models based + on various benchmarks.\",\"id\":\"open-llm-leaderboard/open_llm_leaderboard\"},{\"description\":\"A + leaderboard for comparing chain-of-thought performance of models.\",\"id\":\"logikon/open_cot_leaderboard\"},{\"description\":\"An + text generation based application based on a very powerful LLaMA2 model.\",\"id\":\"ysharma/Explore_llamav2_with_TGI\"},{\"description\":\"An + text generation based application to converse with Zephyr model.\",\"id\":\"HuggingFaceH4/zephyr-chat\"},{\"description\":\"A + leaderboard that ranks text generation models based on blind votes from people.\",\"id\":\"lmsys/chatbot-arena-leaderboard\"},{\"description\":\"An + chatbot to converse with a very powerful text generation model.\",\"id\":\"mlabonne/phixtral-chat\"}],\"summary\":\"Generating + text is the task of generating new text given another text. These models can, + for example, fill in incomplete text or paraphrase.\",\"widgetModels\":[\"mistralai/Mistral-Nemo-Instruct-2407\"],\"youtubeId\":\"e9gNEAlsOvU\",\"id\":\"text-generation\",\"label\":\"Text + Generation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"text-to-image\":{\"datasets\":[{\"description\":\"RedCaps + is a large-scale dataset of 12M image-text pairs collected from Reddit.\",\"id\":\"red_caps\"},{\"description\":\"Conceptual + Captions is a dataset consisting of ~3.3M images annotated with captions.\",\"id\":\"conceptual_captions\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"A + city above clouds, pastel colors, Victorian style\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"image.jpeg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + Inception Score (IS) measure assesses diversity and meaningfulness. It uses + a generated image sample to predict its label. A higher score signifies more + diverse and meaningful images.\",\"id\":\"IS\"},{\"description\":\"The Fr\xE9chet + Inception Distance (FID) calculates the distance between distributions between + synthetic and real samples. A lower FID score indicates better similarity + between the distributions of real and generated images.\",\"id\":\"FID\"},{\"description\":\"R-precision + assesses how the generated image aligns with the provided text description. + It uses the generated images as queries to retrieve relevant text descriptions. + The top 'r' relevant descriptions are selected and used to calculate R-precision + as r/R, where 'R' is the number of ground truth descriptions associated with + the generated images. A higher R-precision value indicates a better model.\",\"id\":\"R-Precision\"}],\"models\":[{\"description\":\"One + of the most powerful image generation models that can generate realistic outputs.\",\"id\":\"black-forest-labs/FLUX.1-dev\"},{\"description\":\"A + powerful yet fast image generation model.\",\"id\":\"latent-consistency/lcm-lora-sdxl\"},{\"description\":\"Text-to-image + model for photorealistic generation.\",\"id\":\"Kwai-Kolors/Kolors\"},{\"description\":\"A + powerful text-to-image model.\",\"id\":\"stabilityai/stable-diffusion-3-medium-diffusers\"}],\"spaces\":[{\"description\":\"A + powerful text-to-image application.\",\"id\":\"stabilityai/stable-diffusion-3-medium\"},{\"description\":\"A + text-to-image application to generate comics.\",\"id\":\"jbilcke-hf/ai-comic-factory\"},{\"description\":\"An + application to match multiple custom image generation models.\",\"id\":\"multimodalart/flux-lora-lab\"},{\"description\":\"A + powerful yet very fast image generation application.\",\"id\":\"latent-consistency/lcm-lora-for-sdxl\"},{\"description\":\"A + gallery to explore various text-to-image models.\",\"id\":\"multimodalart/LoraTheExplorer\"},{\"description\":\"An + application for `text-to-image`, `image-to-image` and image inpainting.\",\"id\":\"ArtGAN/Stable-Diffusion-ControlNet-WebUI\"},{\"description\":\"An + application to generate realistic images given photos of a person and a prompt.\",\"id\":\"InstantX/InstantID\"}],\"summary\":\"Text-to-image + is the task of generating images from input text. These pipelines can also + be used to modify and edit images based on text prompts.\",\"widgetModels\":[\"black-forest-labs/FLUX.1-dev\"],\"youtubeId\":\"\",\"id\":\"text-to-image\",\"label\":\"Text-to-Image\",\"libraries\":[\"diffusers\"]},\"text-to-speech\":{\"canonicalId\":\"text-to-audio\",\"datasets\":[{\"description\":\"10K + hours of multi-speaker English dataset.\",\"id\":\"parler-tts/mls_eng_10k\"},{\"description\":\"Multi-speaker + English dataset.\",\"id\":\"mythicinfinity/libritts_r\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"I + love audio models on the Hub!\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"audio.wav\",\"type\":\"audio\"}]},\"metrics\":[{\"description\":\"The + Mel Cepstral Distortion (MCD) metric is used to calculate the quality of generated + speech.\",\"id\":\"mel cepstral distortion\"}],\"models\":[{\"description\":\"A + powerful TTS model.\",\"id\":\"parler-tts/parler-tts-large-v1\"},{\"description\":\"A + massively multi-lingual TTS model.\",\"id\":\"coqui/XTTS-v2\"},{\"description\":\"Robust + TTS model.\",\"id\":\"metavoiceio/metavoice-1B-v0.1\"},{\"description\":\"A + prompt based, powerful TTS model.\",\"id\":\"parler-tts/parler_tts_mini_v0.1\"}],\"spaces\":[{\"description\":\"An + application for generate highly realistic, multilingual speech.\",\"id\":\"suno/bark\"},{\"description\":\"An + application on XTTS, a voice generation model that lets you clone voices into + different languages.\",\"id\":\"coqui/xtts\"},{\"description\":\"An application + that generates speech in different styles in English and Chinese.\",\"id\":\"mrfakename/E2-F5-TTS\"},{\"description\":\"An + application that synthesizes speech for diverse speaker prompts.\",\"id\":\"parler-tts/parler_tts_mini\"}],\"summary\":\"Text-to-Speech + (TTS) is the task of generating natural sounding speech given text input. + TTS models can be extended to have a single model that generates speech for + multiple speakers and multiple languages.\",\"widgetModels\":[\"suno/bark\"],\"youtubeId\":\"NW62DpzJ274\",\"id\":\"text-to-speech\",\"label\":\"Text-to-Speech\",\"libraries\":[\"espnet\",\"tensorflowtts\",\"transformers\",\"transformers.js\"]},\"text-to-video\":{\"datasets\":[{\"description\":\"Microsoft + Research Video to Text is a large-scale dataset for open domain video captioning\",\"id\":\"iejMac/CLIP-MSR-VTT\"},{\"description\":\"UCF101 + Human Actions dataset consists of 13,320 video clips from YouTube, with 101 + classes.\",\"id\":\"quchenyuan/UCF101-ZIP\"},{\"description\":\"A high-quality + dataset for human action recognition in YouTube videos.\",\"id\":\"nateraw/kinetics\"},{\"description\":\"A + dataset of video clips of humans performing pre-defined basic actions with + everyday objects.\",\"id\":\"HuggingFaceM4/something_something_v2\"},{\"description\":\"This + dataset consists of text-video pairs and contains noisy samples with irrelevant + video descriptions\",\"id\":\"HuggingFaceM4/webvid\"},{\"description\":\"A + dataset of short Flickr videos for the temporal localization of events with + descriptions.\",\"id\":\"iejMac/CLIP-DiDeMo\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"Darth + Vader is surfing on the waves.\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"text-to-video-output.gif\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"Inception + Score uses an image classification model that predicts class labels and evaluates + how distinct and diverse the images are. A higher score indicates better video + generation.\",\"id\":\"is\"},{\"description\":\"Frechet Inception Distance + uses an image classification model to obtain image embeddings. The metric + compares mean and standard deviation of the embeddings of real and generated + images. A smaller score indicates better video generation.\",\"id\":\"fid\"},{\"description\":\"Frechet + Video Distance uses a model that captures coherence for changes in frames + and the quality of each frame. A smaller score indicates better video generation.\",\"id\":\"fvd\"},{\"description\":\"CLIPSIM + measures similarity between video frames and text using an image-text similarity + model. A higher score indicates better video generation.\",\"id\":\"clipsim\"}],\"models\":[{\"description\":\"A + strong model for consistent video generation.\",\"id\":\"rain1011/pyramid-flow-sd3\"},{\"description\":\"A + robust model for text-to-video generation.\",\"id\":\"VideoCrafter/VideoCrafter2\"},{\"description\":\"A + cutting-edge text-to-video generation model.\",\"id\":\"TIGER-Lab/T2V-Turbo-V2\"}],\"spaces\":[{\"description\":\"An + application that generates video from text.\",\"id\":\"VideoCrafter/VideoCrafter\"},{\"description\":\"Consistent + video generation application.\",\"id\":\"TIGER-Lab/T2V-Turbo-V2\"},{\"description\":\"A + cutting edge video generation application.\",\"id\":\"Pyramid-Flow/pyramid-flow\"}],\"summary\":\"Text-to-video + models can be used in any application that requires generating consistent + sequence of images from text. \",\"widgetModels\":[],\"id\":\"text-to-video\",\"label\":\"Text-to-Video\",\"libraries\":[\"diffusers\"]},\"token-classification\":{\"datasets\":[{\"description\":\"A + widely used dataset useful to benchmark named entity recognition models.\",\"id\":\"eriktks/conll2003\"},{\"description\":\"A + multilingual dataset of Wikipedia articles annotated for named entity recognition + in over 150 different languages.\",\"id\":\"unimelb-nlp/wikiann\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"My + name is Omar and I live in Z\xFCrich.\",\"type\":\"text\"}],\"outputs\":[{\"text\":\"My + name is Omar and I live in Z\xFCrich.\",\"tokens\":[{\"type\":\"PERSON\",\"start\":11,\"end\":15},{\"type\":\"GPE\",\"start\":30,\"end\":36}],\"type\":\"text-with-tokens\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + robust performance model to identify people, locations, organizations and + names of miscellaneous entities.\",\"id\":\"dslim/bert-base-NER\"},{\"description\":\"A + strong model to identify people, locations, organizations and names in multiple + languages.\",\"id\":\"FacebookAI/xlm-roberta-large-finetuned-conll03-english\"},{\"description\":\"A + token classification model specialized on medical entity recognition.\",\"id\":\"blaze999/Medical-NER\"},{\"description\":\"Flair + models are typically the state of the art in named entity recognition tasks.\",\"id\":\"flair/ner-english\"}],\"spaces\":[{\"description\":\"An + application that can recognizes entities, extracts noun chunks and recognizes + various linguistic features of each token.\",\"id\":\"spacy/gradio_pipeline_visualizer\"}],\"summary\":\"Token + classification is a natural language understanding task in which a label is + assigned to some tokens in a text. Some popular token classification subtasks + are Named Entity Recognition (NER) and Part-of-Speech (PoS) tagging. NER models + could be trained to identify specific entities in a text, such as dates, individuals + and places; and PoS tagging would identify, for example, which words in a + text are verbs, nouns, and punctuation marks.\",\"widgetModels\":[\"FacebookAI/xlm-roberta-large-finetuned-conll03-english\"],\"youtubeId\":\"wVHdVlPScxA\",\"id\":\"token-classification\",\"label\":\"Token + Classification\",\"libraries\":[\"adapter-transformers\",\"flair\",\"spacy\",\"span-marker\",\"stanza\",\"transformers\",\"transformers.js\"]},\"translation\":{\"canonicalId\":\"text2text-generation\",\"datasets\":[{\"description\":\"A + dataset of copyright-free books translated into 16 different languages.\",\"id\":\"Helsinki-NLP/opus_books\"},{\"description\":\"An + example of translation between programming languages. This dataset consists + of functions in Java and C#.\",\"id\":\"google/code_x_glue_cc_code_to_code_trans\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"My + name is Omar and I live in Z\xFCrich.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Output\",\"content\":\"Mein + Name ist Omar und ich wohne in Z\xFCrich.\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"BLEU + score is calculated by counting the number of shared single or subsequent + tokens between the generated sequence and the reference. Subsequent n tokens + are called \u201Cn-grams\u201D. Unigram refers to a single token while bi-gram + refers to token pairs and n-grams refer to n subsequent tokens. The score + ranges from 0 to 1, where 1 means the translation perfectly matched and 0 + did not match at all\",\"id\":\"bleu\"},{\"description\":\"\",\"id\":\"sacrebleu\"}],\"models\":[{\"description\":\"Very + powerful model that can translate many languages between each other, especially + low-resource languages.\",\"id\":\"facebook/nllb-200-1.3B\"},{\"description\":\"A + general-purpose Transformer that can be used to translate from English to + German, French, or Romanian.\",\"id\":\"google-t5/t5-base\"}],\"spaces\":[{\"description\":\"An + application that can translate between 100 languages.\",\"id\":\"Iker/Translate-100-languages\"},{\"description\":\"An + application that can translate between many languages.\",\"id\":\"Geonmo/nllb-translation-demo\"}],\"summary\":\"Translation + is the task of converting text from one language to another.\",\"widgetModels\":[\"facebook/mbart-large-50-many-to-many-mmt\"],\"youtubeId\":\"1JvfrvZgi6c\",\"id\":\"translation\",\"label\":\"Translation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"unconditional-image-generation\":{\"datasets\":[{\"description\":\"The + CIFAR-100 dataset consists of 60000 32x32 colour images in 100 classes, with + 600 images per class.\",\"id\":\"cifar100\"},{\"description\":\"Multiple images + of celebrities, used for facial expression translation.\",\"id\":\"CelebA\"}],\"demo\":{\"inputs\":[{\"label\":\"Seed\",\"content\":\"42\",\"type\":\"text\"},{\"label\":\"Number + of images to generate:\",\"content\":\"4\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"unconditional-image-generation-output.jpeg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + inception score (IS) evaluates the quality of generated images. It measures + the diversity of the generated images (the model predictions are evenly distributed + across all possible labels) and their 'distinction' or 'sharpness' (the model + confidently predicts a single label for each image).\",\"id\":\"Inception + score (IS)\"},{\"description\":\"The Fr\xE9chet Inception Distance (FID) evaluates + the quality of images created by a generative model by calculating the distance + between feature vectors for real and generated images.\",\"id\":\"Fre\u0107het + Inception Distance (FID)\"}],\"models\":[{\"description\":\"High-quality image + generation model trained on the CIFAR-10 dataset. It synthesizes images of + the ten classes presented in the dataset using diffusion probabilistic models, + a class of latent variable models inspired by considerations from nonequilibrium + thermodynamics.\",\"id\":\"google/ddpm-cifar10-32\"},{\"description\":\"High-quality + image generation model trained on the 256x256 CelebA-HQ dataset. It synthesizes + images of faces using diffusion probabilistic models, a class of latent variable + models inspired by considerations from nonequilibrium thermodynamics.\",\"id\":\"google/ddpm-celebahq-256\"}],\"spaces\":[{\"description\":\"An + application that can generate realistic faces.\",\"id\":\"CompVis/celeba-latent-diffusion\"}],\"summary\":\"Unconditional + image generation is the task of generating images with no condition in any + context (like a prompt text or another image). Once trained, the model will + create images that resemble its training data distribution.\",\"widgetModels\":[\"\"],\"youtubeId\":\"\",\"id\":\"unconditional-image-generation\",\"label\":\"Unconditional + Image Generation\",\"libraries\":[\"diffusers\"]},\"video-text-to-text\":{\"datasets\":[{\"description\":\"Multiple-choice + questions and answers about videos.\",\"id\":\"lmms-lab/Video-MME\"},{\"description\":\"A + dataset of instructions and question-answer pairs about videos.\",\"id\":\"lmms-lab/VideoChatGPT\"},{\"description\":\"Large + video understanding dataset.\",\"id\":\"HuggingFaceFV/finevideo\"}],\"demo\":{\"inputs\":[{\"filename\":\"video-text-to-text-input.gif\",\"type\":\"img\"},{\"label\":\"Text + Prompt\",\"content\":\"What is happening in this video?\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"The + video shows a series of images showing a fountain with water jets and a variety + of colorful flowers and butterflies in the background.\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + robust video-text-to-text model that can take in image and video inputs.\",\"id\":\"llava-hf/llava-onevision-qwen2-72b-ov-hf\"},{\"description\":\"Large + and powerful video-text-to-text model that can take in image and video inputs.\",\"id\":\"llava-hf/LLaVA-NeXT-Video-34B-hf\"}],\"spaces\":[{\"description\":\"An + application to chat with a video-text-to-text model.\",\"id\":\"llava-hf/video-llava\"},{\"description\":\"A + leaderboard for various video-text-to-text models.\",\"id\":\"opencompass/openvlm_video_leaderboard\"}],\"summary\":\"Video-text-to-text + models take in a video and a text prompt and output text. These models are + also called video-language models.\",\"widgetModels\":[\"\"],\"youtubeId\":\"\",\"id\":\"video-text-to-text\",\"label\":\"Video-Text-to-Text\",\"libraries\":[\"transformers\"]},\"visual-question-answering\":{\"datasets\":[{\"description\":\"A + widely used dataset containing questions (with answers) about images.\",\"id\":\"Graphcore/vqa\"},{\"description\":\"A + dataset to benchmark visual reasoning based on text in images.\",\"id\":\"facebook/textvqa\"}],\"demo\":{\"inputs\":[{\"filename\":\"elephant.jpeg\",\"type\":\"img\"},{\"label\":\"Question\",\"content\":\"What + is in this image?\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"elephant\",\"score\":0.97},{\"label\":\"elephants\",\"score\":0.06},{\"label\":\"animal\",\"score\":0.003}]}]},\"isPlaceholder\":false,\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"Measures + how much a predicted answer differs from the ground truth based on the difference + in their semantic meaning.\",\"id\":\"wu-palmer similarity\"}],\"models\":[{\"description\":\"A + visual question answering model trained to convert charts and plots to text.\",\"id\":\"google/deplot\"},{\"description\":\"A + visual question answering model trained for mathematical reasoning and chart + derendering from images.\",\"id\":\"google/matcha-base\"},{\"description\":\"A + strong visual question answering that answers questions from book covers.\",\"id\":\"google/pix2struct-ocrvqa-large\"}],\"spaces\":[{\"description\":\"An + application that compares visual question answering models across different + tasks.\",\"id\":\"merve/pix2struct\"},{\"description\":\"An application that + can answer questions based on images.\",\"id\":\"nielsr/vilt-vqa\"},{\"description\":\"An + application that can caption images and answer questions about a given image. + \",\"id\":\"Salesforce/BLIP\"},{\"description\":\"An application that can + caption images and answer questions about a given image. \",\"id\":\"vumichien/Img2Prompt\"}],\"summary\":\"Visual + Question Answering is the task of answering open-ended questions based on + an image. They output natural language responses to natural language questions.\",\"widgetModels\":[\"dandelin/vilt-b32-finetuned-vqa\"],\"youtubeId\":\"\",\"id\":\"visual-question-answering\",\"label\":\"Visual + Question Answering\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"zero-shot-classification\":{\"datasets\":[{\"description\":\"A + widely used dataset used to benchmark multiple variants of text classification.\",\"id\":\"nyu-mll/glue\"},{\"description\":\"The + Multi-Genre Natural Language Inference (MultiNLI) corpus is a crowd-sourced + collection of 433k sentence pairs annotated with textual entailment information.\",\"id\":\"nyu-mll/multi_nli\"},{\"description\":\"FEVER + is a publicly available dataset for fact extraction and verification against + textual sources.\",\"id\":\"fever/fever\"}],\"demo\":{\"inputs\":[{\"label\":\"Text + Input\",\"content\":\"Dune is the best movie ever.\",\"type\":\"text\"},{\"label\":\"Candidate + Labels\",\"content\":\"CINEMA, ART, MUSIC\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"CINEMA\",\"score\":0.9},{\"label\":\"ART\",\"score\":0.1},{\"label\":\"MUSIC\",\"score\":0}]}]},\"metrics\":[],\"models\":[{\"description\":\"Powerful + zero-shot text classification model.\",\"id\":\"facebook/bart-large-mnli\"},{\"description\":\"Powerful + zero-shot multilingual text classification model that can accomplish multiple + tasks.\",\"id\":\"MoritzLaurer/mDeBERTa-v3-base-xnli-multilingual-nli-2mil7\"}],\"spaces\":[],\"summary\":\"Zero-shot + text classification is a task in natural language processing where a model + is trained on a set of labeled examples but is then able to classify new examples + from previously unseen classes.\",\"widgetModels\":[\"facebook/bart-large-mnli\"],\"id\":\"zero-shot-classification\",\"label\":\"Zero-Shot + Classification\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"zero-shot-image-classification\":{\"datasets\":[{\"description\":\"\",\"id\":\"\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-classification-input.jpeg\",\"type\":\"img\"},{\"label\":\"Classes\",\"content\":\"cat, + dog, bird\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Cat\",\"score\":0.664},{\"label\":\"Dog\",\"score\":0.329},{\"label\":\"Bird\",\"score\":0.008}]}]},\"metrics\":[{\"description\":\"Computes + the number of times the correct label appears in top K labels predicted\",\"id\":\"top-K + accuracy\"}],\"models\":[{\"description\":\"Robust image classification model + trained on publicly available image-caption data.\",\"id\":\"openai/clip-vit-base-patch16\"},{\"description\":\"Strong + zero-shot image classification model.\",\"id\":\"google/siglip-so400m-patch14-224\"},{\"description\":\"Small + yet powerful zero-shot image classification model that can run on edge devices.\",\"id\":\"apple/MobileCLIP-S1-OpenCLIP\"},{\"description\":\"Strong + image classification model for biomedical domain.\",\"id\":\"microsoft/BiomedCLIP-PubMedBERT_256-vit_base_patch16_224\"}],\"spaces\":[{\"description\":\"An + application that leverages zero-shot image classification to find best captions + to generate an image. \",\"id\":\"pharma/CLIP-Interrogator\"},{\"description\":\"An + application to compare different zero-shot image classification models. \",\"id\":\"merve/compare_clip_siglip\"}],\"summary\":\"Zero-shot + image classification is the task of classifying previously unseen classes + during training of a model.\",\"widgetModels\":[\"google/siglip-so400m-patch14-224\"],\"youtubeId\":\"\",\"id\":\"zero-shot-image-classification\",\"label\":\"Zero-Shot + Image Classification\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"zero-shot-object-detection\":{\"datasets\":[],\"demo\":{\"inputs\":[{\"filename\":\"zero-shot-object-detection-input.jpg\",\"type\":\"img\"},{\"label\":\"Classes\",\"content\":\"cat, + dog, bird\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"zero-shot-object-detection-output.jpg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + Average Precision (AP) metric is the Area Under the PR Curve (AUC-PR). It + is calculated for each class separately\",\"id\":\"Average Precision\"},{\"description\":\"The + Mean Average Precision (mAP) metric is the overall average of the AP values\",\"id\":\"Mean + Average Precision\"},{\"description\":\"The AP\u03B1 metric is the Average + Precision at the IoU threshold of a \u03B1 value, for example, AP50 and AP75\",\"id\":\"AP\u03B1\"}],\"models\":[{\"description\":\"Solid + zero-shot object detection model.\",\"id\":\"IDEA-Research/grounding-dino-base\"},{\"description\":\"Cutting-edge + zero-shot object detection model.\",\"id\":\"google/owlv2-base-patch16-ensemble\"}],\"spaces\":[{\"description\":\"A + demo to try the state-of-the-art zero-shot object detection model, OWLv2.\",\"id\":\"merve/owlv2\"},{\"description\":\"A + demo that combines a zero-shot object detection and mask generation model + for zero-shot segmentation.\",\"id\":\"merve/OWLSAM\"}],\"summary\":\"Zero-shot + object detection is a computer vision task to detect objects and their classes + in images, without any prior training or knowledge of the classes. Zero-shot + object detection models receive an image as input, as well as a list of candidate + classes, and output the bounding boxes and labels where the objects have been + detected.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"zero-shot-object-detection\",\"label\":\"Zero-Shot + Object Detection\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"text-to-3d\":{\"datasets\":[{\"description\":\"A + large dataset of over 10 million 3D objects.\",\"id\":\"allenai/objaverse-xl\"},{\"description\":\"Descriptive + captions for 3D objects in Objaverse.\",\"id\":\"tiange/Cap3D\"}],\"demo\":{\"inputs\":[{\"label\":\"Prompt\",\"content\":\"a + cat statue\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Result\",\"content\":\"text-to-3d-3d-output-filename.glb\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"Text-to-3D + mesh model by OpenAI\",\"id\":\"openai/shap-e\"},{\"description\":\"Generative + 3D gaussian splatting model.\",\"id\":\"ashawkey/LGM\"}],\"spaces\":[{\"description\":\"Text-to-3D + demo with mesh outputs.\",\"id\":\"hysts/Shap-E\"},{\"description\":\"Text/image-to-3D + demo with splat outputs.\",\"id\":\"ashawkey/LGM\"}],\"summary\":\"Text-to-3D + models take in text input and produce 3D output.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"text-to-3d\",\"label\":\"Text-to-3D\",\"libraries\":[\"diffusers\"]},\"image-to-3d\":{\"datasets\":[{\"description\":\"A + large dataset of over 10 million 3D objects.\",\"id\":\"allenai/objaverse-xl\"},{\"description\":\"A + dataset of isolated object images for evaluating image-to-3D models.\",\"id\":\"dylanebert/iso3d\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-to-3d-image-input.png\",\"type\":\"img\"}],\"outputs\":[{\"label\":\"Result\",\"content\":\"image-to-3d-3d-output-filename.glb\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"Fast + image-to-3D mesh model by Tencent.\",\"id\":\"TencentARC/InstantMesh\"},{\"description\":\"Fast + image-to-3D mesh model by StabilityAI\",\"id\":\"stabilityai/TripoSR\"},{\"description\":\"A + scaled up image-to-3D mesh model derived from TripoSR.\",\"id\":\"hwjiang/Real3D\"},{\"description\":\"Generative + 3D gaussian splatting model.\",\"id\":\"ashawkey/LGM\"}],\"spaces\":[{\"description\":\"Leaderboard + to evaluate image-to-3D models.\",\"id\":\"dylanebert/3d-arena\"},{\"description\":\"Image-to-3D + demo with mesh outputs.\",\"id\":\"TencentARC/InstantMesh\"},{\"description\":\"Image-to-3D + demo with mesh outputs.\",\"id\":\"stabilityai/TripoSR\"},{\"description\":\"Image-to-3D + demo with mesh outputs.\",\"id\":\"hwjiang/Real3D\"},{\"description\":\"Image-to-3D + demo with splat outputs.\",\"id\":\"dylanebert/LGM-mini\"}],\"summary\":\"Image-to-3D + models take in image input and produce 3D output.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"image-to-3d\",\"label\":\"Image-to-3D\",\"libraries\":[\"diffusers\"]}}" + headers: + Access-Control-Allow-Origin: + - https://huggingface.co + Access-Control-Expose-Headers: + - X-Repo-Commit,X-Request-Id,X-Error-Code,X-Error-Message,X-Total-Count,ETag,Link,Accept-Ranges,Content-Range + Connection: + - keep-alive + Content-Length: + - '73726' + Content-Type: + - application/json; charset=utf-8 + Date: + - Tue, 29 Oct 2024 18:12:02 GMT + ETag: + - W/"11ffe-LYeRHzSmA1Ja2XTx5UWNPygcpnc" + Referrer-Policy: + - strict-origin-when-cross-origin + Vary: + - Origin + Via: + - 1.1 1dcf816edaa6e449f6f9de65a17ce6ec.cloudfront.net (CloudFront) + X-Amz-Cf-Id: + - --lbZ2r1WUUBL1ryvlgaZ_L-Jh5xW3RH1-2GXOTiaxq3J4oDNX7B4g== + X-Amz-Cf-Pop: + - CCU50-P1 + X-Cache: + - Miss from cloudfront + X-Powered-By: + - huggingface-moon + X-Request-Id: + - Root=1-67212572-576d77b5018f7a2545cc1d6c;f12d0ed0-2c54-4aad-bd9f-96db4dff7e8d + cross-origin-opener-policy: + - same-origin + status: + code: 200 + message: OK +- request: + body: null + headers: + user-agent: + - unknown/None; hf_hub/0.26.2; python/3.9.12; torch/2.4.1 + method: POST + uri: https://api-inference.huggingface.co/models/dandelin/vilt-b32-finetuned-vqa + response: + body: + string: '[{"score":0.7786104679107666,"answer":"laying down"},{"score":0.6957443356513977,"answer":"sitting"},{"score":0.6489157676696777,"answer":"resting"},{"score":0.5639538168907166,"answer":"laying"},{"score":0.29528698325157166,"answer":"lying + down"}]' + headers: + Access-Control-Allow-Credentials: + - 'true' + Connection: + - keep-alive + Content-Length: + - '248' + Content-Type: + - application/json + Date: + - Tue, 29 Oct 2024 18:12:03 GMT + Vary: + - Origin, Access-Control-Request-Method, Access-Control-Request-Headers + x-compute-time: + - '0.133' + x-compute-type: + - cache + x-request-id: + - 1_-bir_2TAp93TJ68mWCy + x-sha: + - d0a1f6ab88522427a7ae76ceb6e1e1e7b68a1d08 + status: + code: 200 + message: OK +version: 1 diff --git a/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_zero_shot_classification.yaml b/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_zero_shot_classification.yaml new file mode 100644 index 00000000000..8090bead05c --- /dev/null +++ b/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_zero_shot_classification.yaml @@ -0,0 +1,791 @@ +interactions: +- request: + body: null + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate, br, zstd + Connection: + - keep-alive + X-Amzn-Trace-Id: + - 4a7a0d59-8435-4b07-a0c4-9b8b74f1066d + user-agent: + - unknown/None; hf_hub/0.26.2; python/3.9.12; torch/2.4.1 + method: GET + uri: https://huggingface.co/api/tasks + response: + body: + string: "{\"any-to-any\":{\"datasets\":[],\"demo\":{\"inputs\":[],\"outputs\":[]},\"isPlaceholder\":true,\"metrics\":[],\"models\":[],\"spaces\":[],\"summary\":\"\",\"widgetModels\":[],\"id\":\"any-to-any\",\"label\":\"Any-to-Any\",\"libraries\":[\"transformers\"]},\"audio-classification\":{\"datasets\":[{\"description\":\"A + benchmark of 10 different audio tasks.\",\"id\":\"s3prl/superb\"},{\"description\":\"A + dataset of YouTube clips and their sound categories.\",\"id\":\"agkphysics/AudioSet\"}],\"demo\":{\"inputs\":[{\"filename\":\"audio.wav\",\"type\":\"audio\"}],\"outputs\":[{\"data\":[{\"label\":\"Up\",\"score\":0.2},{\"label\":\"Down\",\"score\":0.8}],\"type\":\"chart\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"An + easy-to-use model for command recognition.\",\"id\":\"speechbrain/google_speech_command_xvector\"},{\"description\":\"An + emotion recognition model.\",\"id\":\"ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition\"},{\"description\":\"A + language identification model.\",\"id\":\"facebook/mms-lid-126\"}],\"spaces\":[{\"description\":\"An + application that can classify music into different genre.\",\"id\":\"kurianbenoy/audioclassification\"}],\"summary\":\"Audio + classification is the task of assigning a label or class to a given audio. + It can be used for recognizing which command a user is giving or the emotion + of a statement, as well as identifying a speaker.\",\"widgetModels\":[\"MIT/ast-finetuned-audioset-10-10-0.4593\"],\"youtubeId\":\"KWwzcmG98Ds\",\"id\":\"audio-classification\",\"label\":\"Audio + Classification\",\"libraries\":[\"speechbrain\",\"transformers\",\"transformers.js\"]},\"audio-to-audio\":{\"datasets\":[{\"description\":\"512-element + X-vector embeddings of speakers from CMU ARCTIC dataset.\",\"id\":\"Matthijs/cmu-arctic-xvectors\"}],\"demo\":{\"inputs\":[{\"filename\":\"input.wav\",\"type\":\"audio\"}],\"outputs\":[{\"filename\":\"label-0.wav\",\"type\":\"audio\"},{\"filename\":\"label-1.wav\",\"type\":\"audio\"}]},\"metrics\":[{\"description\":\"The + Signal-to-Noise ratio is the relationship between the target signal level + and the background noise level. It is calculated as the logarithm of the target + signal divided by the background noise, in decibels.\",\"id\":\"snri\"},{\"description\":\"The + Signal-to-Distortion ratio is the relationship between the target signal and + the sum of noise, interference, and artifact errors\",\"id\":\"sdri\"}],\"models\":[{\"description\":\"A + solid model of audio source separation.\",\"id\":\"speechbrain/sepformer-wham\"},{\"description\":\"A + speech enhancement model.\",\"id\":\"ResembleAI/resemble-enhance\"},{\"description\":\"A + model that can change the voice in a speech recording.\",\"id\":\"microsoft/speecht5_vc\"}],\"spaces\":[{\"description\":\"An + application for speech separation.\",\"id\":\"younver/speechbrain-speech-separation\"},{\"description\":\"An + application for audio style transfer.\",\"id\":\"nakas/audio-diffusion_style_transfer\"}],\"summary\":\"Audio-to-Audio + is a family of tasks in which the input is an audio and the output is one + or multiple generated audios. Some example tasks are speech enhancement and + source separation.\",\"widgetModels\":[\"speechbrain/sepformer-wham\"],\"youtubeId\":\"iohj7nCCYoM\",\"id\":\"audio-to-audio\",\"label\":\"Audio-to-Audio\",\"libraries\":[\"asteroid\",\"fairseq\",\"speechbrain\"]},\"automatic-speech-recognition\":{\"datasets\":[{\"description\":\"31,175 + hours of multilingual audio-text dataset in 108 languages.\",\"id\":\"mozilla-foundation/common_voice_17_0\"},{\"description\":\"A + dataset with 44.6k hours of English speaker data and 6k hours of other language + speakers.\",\"id\":\"parler-tts/mls_eng\"},{\"description\":\"A multi-lingual + audio dataset with 370K hours of audio.\",\"id\":\"espnet/yodas\"}],\"demo\":{\"inputs\":[{\"filename\":\"input.flac\",\"type\":\"audio\"}],\"outputs\":[{\"label\":\"Transcript\",\"content\":\"Going + along slushy country roads and speaking to damp audiences in...\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"wer\"},{\"description\":\"\",\"id\":\"cer\"}],\"models\":[{\"description\":\"A + powerful ASR model by OpenAI.\",\"id\":\"openai/whisper-large-v3\"},{\"description\":\"A + good generic speech model by MetaAI for fine-tuning.\",\"id\":\"facebook/w2v-bert-2.0\"},{\"description\":\"An + end-to-end model that performs ASR and Speech Translation by MetaAI.\",\"id\":\"facebook/seamless-m4t-v2-large\"},{\"description\":\"Powerful + speaker diarization model.\",\"id\":\"pyannote/speaker-diarization-3.1\"}],\"spaces\":[{\"description\":\"A + powerful general-purpose speech recognition application.\",\"id\":\"hf-audio/whisper-large-v3\"},{\"description\":\"Fastest + speech recognition application.\",\"id\":\"sanchit-gandhi/whisper-jax\"},{\"description\":\"A + high quality speech and text translation model by Meta.\",\"id\":\"facebook/seamless_m4t\"}],\"summary\":\"Automatic + Speech Recognition (ASR), also known as Speech to Text (STT), is the task + of transcribing a given audio to text. It has many applications, such as voice + user interfaces.\",\"widgetModels\":[\"openai/whisper-large-v3\"],\"youtubeId\":\"TksaY_FDgnk\",\"id\":\"automatic-speech-recognition\",\"label\":\"Automatic + Speech Recognition\",\"libraries\":[\"espnet\",\"nemo\",\"speechbrain\",\"transformers\",\"transformers.js\"]},\"depth-estimation\":{\"datasets\":[{\"description\":\"NYU + Depth V2 Dataset: Video dataset containing both RGB and depth sensor data.\",\"id\":\"sayakpaul/nyu_depth_v2\"},{\"description\":\"Monocular + depth estimation benchmark based without noise and errors.\",\"id\":\"depth-anything/DA-2K\"}],\"demo\":{\"inputs\":[{\"filename\":\"depth-estimation-input.jpg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"depth-estimation-output.png\",\"type\":\"img\"}]},\"metrics\":[],\"models\":[{\"description\":\"Cutting-edge + depth estimation model.\",\"id\":\"depth-anything/Depth-Anything-V2-Large\"},{\"description\":\"A + strong monocular depth estimation model.\",\"id\":\"jingheya/lotus-depth-g-v1-0\"},{\"description\":\"A + depth estimation model that predicts depth in videos.\",\"id\":\"tencent/DepthCrafter\"},{\"description\":\"A + robust depth estimation model.\",\"id\":\"apple/DepthPro\"}],\"spaces\":[{\"description\":\"An + application that predicts the depth of an image and then reconstruct the 3D + model as voxels.\",\"id\":\"radames/dpt-depth-estimation-3d-voxels\"},{\"description\":\"An + application for bleeding-edge depth estimation.\",\"id\":\"akhaliq/depth-pro\"},{\"description\":\"An + application on cutting-edge depth estimation in videos.\",\"id\":\"tencent/DepthCrafter\"},{\"description\":\"A + human-centric depth estimation application.\",\"id\":\"facebook/sapiens-depth\"}],\"summary\":\"Depth + estimation is the task of predicting depth of the objects present in an image.\",\"widgetModels\":[\"\"],\"youtubeId\":\"\",\"id\":\"depth-estimation\",\"label\":\"Depth + Estimation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"document-question-answering\":{\"datasets\":[{\"description\":\"Largest + document understanding dataset.\",\"id\":\"HuggingFaceM4/Docmatix\"},{\"description\":\"Dataset + from the 2020 DocVQA challenge. The documents are taken from the UCSF Industry + Documents Library.\",\"id\":\"eliolio/docvqa\"}],\"demo\":{\"inputs\":[{\"label\":\"Question\",\"content\":\"What + is the idea behind the consumer relations efficiency team?\",\"type\":\"text\"},{\"filename\":\"document-question-answering-input.png\",\"type\":\"img\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"Balance + cost efficiency with quality customer service\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"The + evaluation metric for the DocVQA challenge is the Average Normalized Levenshtein + Similarity (ANLS). This metric is flexible to character regognition errors + and compares the predicted answer with the ground truth answer.\",\"id\":\"anls\"},{\"description\":\"Exact + Match is a metric based on the strict character match of the predicted answer + and the right answer. For answers predicted correctly, the Exact Match will + be 1. Even if only one character is different, Exact Match will be 0\",\"id\":\"exact-match\"}],\"models\":[{\"description\":\"A + robust document question answering model.\",\"id\":\"impira/layoutlm-document-qa\"},{\"description\":\"A + document question answering model specialized in invoices.\",\"id\":\"impira/layoutlm-invoices\"},{\"description\":\"A + special model for OCR-free document question answering.\",\"id\":\"microsoft/udop-large\"},{\"description\":\"A + powerful model for document question answering.\",\"id\":\"google/pix2struct-docvqa-large\"}],\"spaces\":[{\"description\":\"A + robust document question answering application.\",\"id\":\"impira/docquery\"},{\"description\":\"An + application that can answer questions from invoices.\",\"id\":\"impira/invoices\"},{\"description\":\"An + application to compare different document question answering models.\",\"id\":\"merve/compare_docvqa_models\"}],\"summary\":\"Document + Question Answering (also known as Document Visual Question Answering) is the + task of answering questions on document images. Document question answering + models take a (document, question) pair as input and return an answer in natural + language. Models usually rely on multi-modal features, combining text, position + of words (bounding-boxes) and image.\",\"widgetModels\":[\"impira/layoutlm-invoices\"],\"youtubeId\":\"\",\"id\":\"document-question-answering\",\"label\":\"Document + Question Answering\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"feature-extraction\":{\"datasets\":[{\"description\":\"Wikipedia + dataset containing cleaned articles of all languages. Can be used to train + `feature-extraction` models.\",\"id\":\"wikipedia\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"India, + officially the Republic of India, is a country in South Asia.\",\"type\":\"text\"}],\"outputs\":[{\"table\":[[\"Dimension + 1\",\"Dimension 2\",\"Dimension 3\"],[\"2.583383083343506\",\"2.757075071334839\",\"0.9023529887199402\"],[\"8.29393482208252\",\"1.1071064472198486\",\"2.03399395942688\"],[\"-0.7754912972450256\",\"-1.647324562072754\",\"-0.6113331913948059\"],[\"0.07087723910808563\",\"1.5942802429199219\",\"1.4610432386398315\"]],\"type\":\"tabular\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + powerful feature extraction model for natural language processing tasks.\",\"id\":\"thenlper/gte-large\"},{\"description\":\"A + strong feature extraction model for retrieval.\",\"id\":\"Alibaba-NLP/gte-Qwen1.5-7B-instruct\"}],\"spaces\":[{\"description\":\"A + leaderboard to rank text feature extraction models based on a benchmark.\",\"id\":\"mteb/leaderboard\"},{\"description\":\"A + leaderboard to rank best feature extraction models based on human feedback.\",\"id\":\"mteb/arena\"}],\"summary\":\"Feature + extraction is the task of extracting features learnt in a model.\",\"widgetModels\":[\"facebook/bart-base\"],\"id\":\"feature-extraction\",\"label\":\"Feature + Extraction\",\"libraries\":[\"sentence-transformers\",\"transformers\",\"transformers.js\"]},\"fill-mask\":{\"datasets\":[{\"description\":\"A + common dataset that is used to train models for many languages.\",\"id\":\"wikipedia\"},{\"description\":\"A + large English dataset with text crawled from the web.\",\"id\":\"c4\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"The + barked at me\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"wolf\",\"score\":0.487},{\"label\":\"dog\",\"score\":0.061},{\"label\":\"cat\",\"score\":0.058},{\"label\":\"fox\",\"score\":0.047},{\"label\":\"squirrel\",\"score\":0.025}]}]},\"metrics\":[{\"description\":\"Cross + Entropy is a metric that calculates the difference between two probability + distributions. Each probability distribution is the distribution of predicted + words\",\"id\":\"cross_entropy\"},{\"description\":\"Perplexity is the exponential + of the cross-entropy loss. It evaluates the probabilities assigned to the + next word by the model. Lower perplexity indicates better performance\",\"id\":\"perplexity\"}],\"models\":[{\"description\":\"The + famous BERT model.\",\"id\":\"google-bert/bert-base-uncased\"},{\"description\":\"A + multilingual model trained on 100 languages.\",\"id\":\"FacebookAI/xlm-roberta-base\"}],\"spaces\":[],\"summary\":\"Masked + language modeling is the task of masking some of the words in a sentence and + predicting which words should replace those masks. These models are useful + when we want to get a statistical understanding of the language in which the + model is trained in.\",\"widgetModels\":[\"distilroberta-base\"],\"youtubeId\":\"mqElG5QJWUg\",\"id\":\"fill-mask\",\"label\":\"Fill-Mask\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"image-classification\":{\"datasets\":[{\"description\":\"Benchmark + dataset used for image classification with images that belong to 100 classes.\",\"id\":\"cifar100\"},{\"description\":\"Dataset + consisting of images of garments.\",\"id\":\"fashion_mnist\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-classification-input.jpeg\",\"type\":\"img\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Egyptian + cat\",\"score\":0.514},{\"label\":\"Tabby cat\",\"score\":0.193},{\"label\":\"Tiger + cat\",\"score\":0.068}]}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + strong image classification model.\",\"id\":\"google/vit-base-patch16-224\"},{\"description\":\"A + robust image classification model.\",\"id\":\"facebook/deit-base-distilled-patch16-224\"},{\"description\":\"A + strong image classification model.\",\"id\":\"facebook/convnext-large-224\"}],\"spaces\":[{\"description\":\"An + application that classifies what a given image is about.\",\"id\":\"nielsr/perceiver-image-classification\"}],\"summary\":\"Image + classification is the task of assigning a label or class to an entire image. + Images are expected to have only one class for each image. Image classification + models take an image as input and return a prediction about which class the + image belongs to.\",\"widgetModels\":[\"google/vit-base-patch16-224\"],\"youtubeId\":\"tjAIM7BOYhw\",\"id\":\"image-classification\",\"label\":\"Image + Classification\",\"libraries\":[\"keras\",\"timm\",\"transformers\",\"transformers.js\"]},\"image-feature-extraction\":{\"datasets\":[{\"description\":\"ImageNet-1K + is a image classification dataset in which images are used to train image-feature-extraction + models.\",\"id\":\"imagenet-1k\"}],\"demo\":{\"inputs\":[{\"filename\":\"mask-generation-input.png\",\"type\":\"img\"}],\"outputs\":[{\"table\":[[\"Dimension + 1\",\"Dimension 2\",\"Dimension 3\"],[\"0.21236686408519745\",\"1.0919708013534546\",\"0.8512550592422485\"],[\"0.809657871723175\",\"-0.18544459342956543\",\"-0.7851548194885254\"],[\"1.3103108406066895\",\"-0.2479034662246704\",\"-0.9107287526130676\"],[\"1.8536205291748047\",\"-0.36419737339019775\",\"0.09717650711536407\"]],\"type\":\"tabular\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + powerful image feature extraction model.\",\"id\":\"timm/vit_large_patch14_dinov2.lvd142m\"},{\"description\":\"A + strong image feature extraction model.\",\"id\":\"nvidia/MambaVision-T-1K\"},{\"description\":\"A + robust image feature extraction model.\",\"id\":\"facebook/dino-vitb16\"},{\"description\":\"Strong + image feature extraction model made for information retrieval from documents.\",\"id\":\"vidore/colpali\"},{\"description\":\"Strong + image feature extraction model that can be used on images and documents.\",\"id\":\"OpenGVLab/InternViT-6B-448px-V1-2\"}],\"spaces\":[],\"summary\":\"Image + feature extraction is the task of extracting features learnt in a computer + vision model.\",\"widgetModels\":[],\"id\":\"image-feature-extraction\",\"label\":\"Image + Feature Extraction\",\"libraries\":[\"timm\",\"transformers\"]},\"image-segmentation\":{\"datasets\":[{\"description\":\"Scene + segmentation dataset.\",\"id\":\"scene_parse_150\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-segmentation-input.jpeg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"image-segmentation-output.png\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"Average + Precision (AP) is the Area Under the PR Curve (AUC-PR). It is calculated for + each semantic class separately\",\"id\":\"Average Precision\"},{\"description\":\"Mean + Average Precision (mAP) is the overall average of the AP values\",\"id\":\"Mean + Average Precision\"},{\"description\":\"Intersection over Union (IoU) is the + overlap of segmentation masks. Mean IoU is the average of the IoU of all semantic + classes\",\"id\":\"Mean Intersection over Union\"},{\"description\":\"AP\u03B1 + is the Average Precision at the IoU threshold of a \u03B1 value, for example, + AP50 and AP75\",\"id\":\"AP\u03B1\"}],\"models\":[{\"description\":\"Solid + semantic segmentation model trained on ADE20k.\",\"id\":\"openmmlab/upernet-convnext-small\"},{\"description\":\"Background + removal model.\",\"id\":\"briaai/RMBG-1.4\"},{\"description\":\"A multipurpose + image segmentation model for high resolution images.\",\"id\":\"ZhengPeng7/BiRefNet\"},{\"description\":\"Powerful + human-centric image segmentation model.\",\"id\":\"facebook/sapiens-seg-1b\"},{\"description\":\"Panoptic + segmentation model trained on the COCO (common objects) dataset.\",\"id\":\"facebook/mask2former-swin-large-coco-panoptic\"}],\"spaces\":[{\"description\":\"A + semantic segmentation application that can predict unseen instances out of + the box.\",\"id\":\"facebook/ov-seg\"},{\"description\":\"One of the strongest + segmentation applications.\",\"id\":\"jbrinkma/segment-anything\"},{\"description\":\"A + human-centric segmentation model.\",\"id\":\"facebook/sapiens-pose\"},{\"description\":\"An + instance segmentation application to predict neuronal cell types from microscopy + images.\",\"id\":\"rashmi/sartorius-cell-instance-segmentation\"},{\"description\":\"An + application that segments videos.\",\"id\":\"ArtGAN/Segment-Anything-Video\"},{\"description\":\"An + panoptic segmentation application built for outdoor environments.\",\"id\":\"segments/panoptic-segment-anything\"}],\"summary\":\"Image + Segmentation divides an image into segments where each pixel in the image + is mapped to an object. This task has multiple variants such as instance segmentation, + panoptic segmentation and semantic segmentation.\",\"widgetModels\":[\"nvidia/segformer-b0-finetuned-ade-512-512\"],\"youtubeId\":\"dKE8SIt9C-w\",\"id\":\"image-segmentation\",\"label\":\"Image + Segmentation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"image-to-image\":{\"datasets\":[{\"description\":\"Synthetic + dataset, for image relighting\",\"id\":\"VIDIT\"},{\"description\":\"Multiple + images of celebrities, used for facial expression translation\",\"id\":\"huggan/CelebA-faces\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-to-image-input.jpeg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"image-to-image-output.png\",\"type\":\"img\"}]},\"isPlaceholder\":false,\"metrics\":[{\"description\":\"Peak + Signal to Noise Ratio (PSNR) is an approximation of the human perception, + considering the ratio of the absolute intensity with respect to the variations. + Measured in dB, a high value indicates a high fidelity.\",\"id\":\"PSNR\"},{\"description\":\"Structural + Similarity Index (SSIM) is a perceptual metric which compares the luminance, + contrast and structure of two images. The values of SSIM range between -1 + and 1, and higher values indicate closer resemblance to the original image.\",\"id\":\"SSIM\"},{\"description\":\"Inception + Score (IS) is an analysis of the labels predicted by an image classification + model when presented with a sample of the generated images.\",\"id\":\"IS\"}],\"models\":[{\"description\":\"An + image-to-image model to improve image resolution.\",\"id\":\"fal/AuraSR-v2\"},{\"description\":\"A + model that increases the resolution of an image.\",\"id\":\"keras-io/super-resolution\"},{\"description\":\"A + model that creates a set of variations of the input image in the style of + DALL-E using Stable Diffusion.\",\"id\":\"lambdalabs/sd-image-variations-diffusers\"},{\"description\":\"A + model that generates images based on segments in the input image and the text + prompt.\",\"id\":\"mfidabel/controlnet-segment-anything\"},{\"description\":\"A + model that takes an image and an instruction to edit the image.\",\"id\":\"timbrooks/instruct-pix2pix\"}],\"spaces\":[{\"description\":\"Image + enhancer application for low light.\",\"id\":\"keras-io/low-light-image-enhancement\"},{\"description\":\"Style + transfer application.\",\"id\":\"keras-io/neural-style-transfer\"},{\"description\":\"An + application that generates images based on segment control.\",\"id\":\"mfidabel/controlnet-segment-anything\"},{\"description\":\"Image + generation application that takes image control and text prompt.\",\"id\":\"hysts/ControlNet\"},{\"description\":\"Colorize + any image using this app.\",\"id\":\"ioclab/brightness-controlnet\"},{\"description\":\"Edit + images with instructions.\",\"id\":\"timbrooks/instruct-pix2pix\"}],\"summary\":\"Image-to-image + is the task of transforming an input image through a variety of possible manipulations + and enhancements, such as super-resolution, image inpainting, colorization, + and more.\",\"widgetModels\":[\"stabilityai/stable-diffusion-2-inpainting\"],\"youtubeId\":\"\",\"id\":\"image-to-image\",\"label\":\"Image-to-Image\",\"libraries\":[\"diffusers\",\"transformers\",\"transformers.js\"]},\"image-text-to-text\":{\"datasets\":[{\"description\":\"Instructions + composed of image and text.\",\"id\":\"liuhaotian/LLaVA-Instruct-150K\"},{\"description\":\"Conversation + turns where questions involve image and text.\",\"id\":\"liuhaotian/LLaVA-Pretrain\"},{\"description\":\"A + collection of datasets made for model fine-tuning.\",\"id\":\"HuggingFaceM4/the_cauldron\"},{\"description\":\"Screenshots + of websites with their HTML/CSS codes.\",\"id\":\"HuggingFaceM4/WebSight\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-text-to-text-input.png\",\"type\":\"img\"},{\"label\":\"Text + Prompt\",\"content\":\"Describe the position of the bee in detail.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"The + bee is sitting on a pink flower, surrounded by other flowers. The bee is positioned + in the center of the flower, with its head and front legs sticking out.\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"Powerful + vision language model with great visual understanding and reasoning capabilities.\",\"id\":\"meta-llama/Llama-3.2-11B-Vision-Instruct\"},{\"description\":\"Cutting-edge + vision language models.\",\"id\":\"allenai/Molmo-7B-D-0924\"},{\"description\":\"Small + yet powerful model.\",\"id\":\"vikhyatk/moondream2\"},{\"description\":\"Strong + image-text-to-text model.\",\"id\":\"Qwen/Qwen2-VL-7B-Instruct\"},{\"description\":\"Strong + image-text-to-text model.\",\"id\":\"mistralai/Pixtral-12B-2409\"},{\"description\":\"Strong + image-text-to-text model focused on documents.\",\"id\":\"stepfun-ai/GOT-OCR2_0\"}],\"spaces\":[{\"description\":\"Leaderboard + to evaluate vision language models.\",\"id\":\"opencompass/open_vlm_leaderboard\"},{\"description\":\"Vision + language models arena, where models are ranked by votes of users.\",\"id\":\"WildVision/vision-arena\"},{\"description\":\"Powerful + vision-language model assistant.\",\"id\":\"akhaliq/Molmo-7B-D-0924\"},{\"description\":\"An + image-text-to-text application focused on documents.\",\"id\":\"stepfun-ai/GOT_official_online_demo\"},{\"description\":\"An + application to compare outputs of different vision language models.\",\"id\":\"merve/compare_VLMs\"},{\"description\":\"An + application for chatting with an image-text-to-text model.\",\"id\":\"GanymedeNil/Qwen2-VL-7B\"}],\"summary\":\"Image-text-to-text + models take in an image and text prompt and output text. These models are + also called vision-language models, or VLMs. The difference from image-to-text + models is that these models take an additional text input, not restricting + the model to certain use cases like image captioning, and may also be trained + to accept a conversation as input.\",\"widgetModels\":[\"meta-llama/Llama-3.2-11B-Vision-Instruct\"],\"youtubeId\":\"IoGaGfU1CIg\",\"id\":\"image-text-to-text\",\"label\":\"Image-Text-to-Text\",\"libraries\":[\"transformers\"]},\"image-to-text\":{\"datasets\":[{\"description\":\"Dataset + from 12M image-text of Reddit\",\"id\":\"red_caps\"},{\"description\":\"Dataset + from 3.3M images of Google\",\"id\":\"datasets/conceptual_captions\"}],\"demo\":{\"inputs\":[{\"filename\":\"savanna.jpg\",\"type\":\"img\"}],\"outputs\":[{\"label\":\"Detailed + description\",\"content\":\"a herd of giraffes and zebras grazing in a field\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + robust image captioning model.\",\"id\":\"Salesforce/blip2-opt-2.7b\"},{\"description\":\"A + powerful and accurate image-to-text model that can also localize concepts + in images.\",\"id\":\"microsoft/kosmos-2-patch14-224\"},{\"description\":\"A + strong optical character recognition model.\",\"id\":\"facebook/nougat-base\"},{\"description\":\"A + powerful model that lets you have a conversation with the image.\",\"id\":\"llava-hf/llava-1.5-7b-hf\"}],\"spaces\":[{\"description\":\"An + application that compares various image captioning models.\",\"id\":\"nielsr/comparing-captioning-models\"},{\"description\":\"A + robust image captioning application.\",\"id\":\"flax-community/image-captioning\"},{\"description\":\"An + application that transcribes handwritings into text.\",\"id\":\"nielsr/TrOCR-handwritten\"},{\"description\":\"An + application that can caption images and answer questions about a given image.\",\"id\":\"Salesforce/BLIP\"},{\"description\":\"An + application that can caption images and answer questions with a conversational + agent.\",\"id\":\"Salesforce/BLIP2\"},{\"description\":\"An image captioning + application that demonstrates the effect of noise on captions.\",\"id\":\"johko/capdec-image-captioning\"}],\"summary\":\"Image + to text models output a text from a given image. Image captioning or optical + character recognition can be considered as the most common applications of + image to text.\",\"widgetModels\":[\"Salesforce/blip-image-captioning-large\"],\"youtubeId\":\"\",\"id\":\"image-to-text\",\"label\":\"Image-to-Text\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"keypoint-detection\":{\"datasets\":[{\"description\":\"A + dataset of hand keypoints of over 500k examples.\",\"id\":\"Vincent-luo/hagrid-mediapipe-hands\"}],\"demo\":{\"inputs\":[{\"filename\":\"keypoint-detection-input.png\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"keypoint-detection-output.png\",\"type\":\"img\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + robust keypoint detection model.\",\"id\":\"magic-leap-community/superpoint\"},{\"description\":\"Strong + keypoint detection model used to detect human pose.\",\"id\":\"facebook/sapiens-pose-1b\"}],\"spaces\":[{\"description\":\"An + application that detects hand keypoints in real-time.\",\"id\":\"datasciencedojo/Hand-Keypoint-Detection-Realtime\"},{\"description\":\"An + application to try a universal keypoint detection model.\",\"id\":\"merve/SuperPoint\"}],\"summary\":\"Keypoint + detection is the task of identifying meaningful distinctive points or features + in an image.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"keypoint-detection\",\"label\":\"Keypoint + Detection\",\"libraries\":[\"transformers\"]},\"mask-generation\":{\"datasets\":[],\"demo\":{\"inputs\":[{\"filename\":\"mask-generation-input.png\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"mask-generation-output.png\",\"type\":\"img\"}]},\"metrics\":[],\"models\":[{\"description\":\"Small + yet powerful mask generation model.\",\"id\":\"Zigeng/SlimSAM-uniform-50\"},{\"description\":\"Very + strong mask generation model.\",\"id\":\"facebook/sam2-hiera-large\"}],\"spaces\":[{\"description\":\"An + application that combines a mask generation model with a zero-shot object + detection model for text-guided image segmentation.\",\"id\":\"merve/OWLSAM2\"},{\"description\":\"An + application that compares the performance of a large and a small mask generation + model.\",\"id\":\"merve/slimsam\"},{\"description\":\"An application based + on an improved mask generation model.\",\"id\":\"SkalskiP/segment-anything-model-2\"},{\"description\":\"An + application to remove objects from videos using mask generation models.\",\"id\":\"SkalskiP/SAM_and_ProPainter\"}],\"summary\":\"Mask + generation is the task of generating masks that identify a specific object + or region of interest in a given image. Masks are often used in segmentation + tasks, where they provide a precise way to isolate the object of interest + for further processing or analysis.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"mask-generation\",\"label\":\"Mask + Generation\",\"libraries\":[\"transformers\"]},\"object-detection\":{\"datasets\":[{\"description\":\"Widely + used benchmark dataset for multiple vision tasks.\",\"id\":\"merve/coco2017\"},{\"description\":\"Multi-task + computer vision benchmark.\",\"id\":\"merve/pascal-voc\"}],\"demo\":{\"inputs\":[{\"filename\":\"object-detection-input.jpg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"object-detection-output.jpg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + Average Precision (AP) metric is the Area Under the PR Curve (AUC-PR). It + is calculated for each class separately\",\"id\":\"Average Precision\"},{\"description\":\"The + Mean Average Precision (mAP) metric is the overall average of the AP values\",\"id\":\"Mean + Average Precision\"},{\"description\":\"The AP\u03B1 metric is the Average + Precision at the IoU threshold of a \u03B1 value, for example, AP50 and AP75\",\"id\":\"AP\u03B1\"}],\"models\":[{\"description\":\"Solid + object detection model pre-trained on the COCO 2017 dataset.\",\"id\":\"facebook/detr-resnet-50\"},{\"description\":\"Real-time + and accurate object detection model.\",\"id\":\"jameslahm/yolov10x\"},{\"description\":\"Fast + and accurate object detection model trained on COCO and Object365 datasets.\",\"id\":\"PekingU/rtdetr_r18vd_coco_o365\"}],\"spaces\":[{\"description\":\"Leaderboard + to compare various object detection models across several metrics.\",\"id\":\"hf-vision/object_detection_leaderboard\"},{\"description\":\"An + application that contains various object detection models to try from.\",\"id\":\"Gradio-Blocks/Object-Detection-With-DETR-and-YOLOS\"},{\"description\":\"An + application that shows multiple cutting edge techniques for object detection + and tracking.\",\"id\":\"kadirnar/torchyolo\"},{\"description\":\"An object + tracking, segmentation and inpainting application.\",\"id\":\"VIPLab/Track-Anything\"},{\"description\":\"Very + fast object tracking application based on object detection.\",\"id\":\"merve/RT-DETR-tracking-coco\"}],\"summary\":\"Object + Detection models allow users to identify objects of certain defined classes. + Object detection models receive an image as input and output the images with + bounding boxes and labels on detected objects.\",\"widgetModels\":[\"facebook/detr-resnet-50\"],\"youtubeId\":\"WdAeKSOpxhw\",\"id\":\"object-detection\",\"label\":\"Object + Detection\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"video-classification\":{\"datasets\":[{\"description\":\"Benchmark + dataset used for video classification with videos that belong to 400 classes.\",\"id\":\"kinetics400\"}],\"demo\":{\"inputs\":[{\"filename\":\"video-classification-input.gif\",\"type\":\"img\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Playing + Guitar\",\"score\":0.514},{\"label\":\"Playing Tennis\",\"score\":0.193},{\"label\":\"Cooking\",\"score\":0.068}]}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"Strong + Video Classification model trained on the Kinetics 400 dataset.\",\"id\":\"google/vivit-b-16x2-kinetics400\"},{\"description\":\"Strong + Video Classification model trained on the Kinetics 400 dataset.\",\"id\":\"microsoft/xclip-base-patch32\"}],\"spaces\":[{\"description\":\"An + application that classifies video at different timestamps.\",\"id\":\"nateraw/lavila\"},{\"description\":\"An + application that classifies video.\",\"id\":\"fcakyon/video-classification\"}],\"summary\":\"Video + classification is the task of assigning a label or class to an entire video. + Videos are expected to have only one class for each video. Video classification + models take a video as input and return a prediction about which class the + video belongs to.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"video-classification\",\"label\":\"Video + Classification\",\"libraries\":[\"transformers\"]},\"question-answering\":{\"datasets\":[{\"description\":\"A + famous question answering dataset based on English articles from Wikipedia.\",\"id\":\"squad_v2\"},{\"description\":\"A + dataset of aggregated anonymized actual queries issued to the Google search + engine.\",\"id\":\"natural_questions\"}],\"demo\":{\"inputs\":[{\"label\":\"Question\",\"content\":\"Which + name is also used to describe the Amazon rainforest in English?\",\"type\":\"text\"},{\"label\":\"Context\",\"content\":\"The + Amazon rainforest, also known in English as Amazonia or the Amazon Jungle\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"Amazonia\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Exact + Match is a metric based on the strict character match of the predicted answer + and the right answer. For answers predicted correctly, the Exact Match will + be 1. Even if only one character is different, Exact Match will be 0\",\"id\":\"exact-match\"},{\"description\":\" + The F1-Score metric is useful if we value both false positives and false negatives + equally. The F1-Score is calculated on each word in the predicted sequence + against the correct answer\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + robust baseline model for most question answering domains.\",\"id\":\"deepset/roberta-base-squad2\"},{\"description\":\"Small + yet robust model that can answer questions.\",\"id\":\"distilbert/distilbert-base-cased-distilled-squad\"},{\"description\":\"A + special model that can answer questions from tables.\",\"id\":\"google/tapas-base-finetuned-wtq\"}],\"spaces\":[{\"description\":\"An + application that can answer a long question from Wikipedia.\",\"id\":\"deepset/wikipedia-assistant\"}],\"summary\":\"Question + Answering models can retrieve the answer to a question from a given text, + which is useful for searching for an answer in a document. Some question answering + models can generate answers without context!\",\"widgetModels\":[\"deepset/roberta-base-squad2\"],\"youtubeId\":\"ajPx5LwJD-I\",\"id\":\"question-answering\",\"label\":\"Question + Answering\",\"libraries\":[\"adapter-transformers\",\"allennlp\",\"transformers\",\"transformers.js\"]},\"reinforcement-learning\":{\"datasets\":[{\"description\":\"A + curation of widely used datasets for Data Driven Deep Reinforcement Learning + (D4RL)\",\"id\":\"edbeeching/decision_transformer_gym_replay\"}],\"demo\":{\"inputs\":[{\"label\":\"State\",\"content\":\"Red + traffic light, pedestrians are about to pass.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Action\",\"content\":\"Stop + the car.\",\"type\":\"text\"},{\"label\":\"Next State\",\"content\":\"Yellow + light, pedestrians have crossed.\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Accumulated + reward across all time steps discounted by a factor that ranges between 0 + and 1 and determines how much the agent optimizes for future relative to immediate + rewards. Measures how good is the policy ultimately found by a given algorithm + considering uncertainty over the future.\",\"id\":\"Discounted Total Reward\"},{\"description\":\"Average + return obtained after running the policy for a certain number of evaluation + episodes. As opposed to total reward, mean reward considers how much reward + a given algorithm receives while learning.\",\"id\":\"Mean Reward\"},{\"description\":\"Measures + how good a given algorithm is after a predefined time. Some algorithms may + be guaranteed to converge to optimal behavior across many time steps. However, + an agent that reaches an acceptable level of optimality after a given time + horizon may be preferable to one that ultimately reaches optimality but takes + a long time.\",\"id\":\"Level of Performance After Some Time\"}],\"models\":[{\"description\":\"A + Reinforcement Learning model trained on expert data from the Gym Hopper environment\",\"id\":\"edbeeching/decision-transformer-gym-hopper-expert\"},{\"description\":\"A + PPO agent playing seals/CartPole-v0 using the stable-baselines3 library and + the RL Zoo.\",\"id\":\"HumanCompatibleAI/ppo-seals-CartPole-v0\"}],\"spaces\":[{\"description\":\"An + application for a cute puppy agent learning to catch a stick.\",\"id\":\"ThomasSimonini/Huggy\"},{\"description\":\"An + application to play Snowball Fight with a reinforcement learning agent.\",\"id\":\"ThomasSimonini/SnowballFight\"}],\"summary\":\"Reinforcement + learning is the computational approach of learning from action by interacting + with an environment through trial and error and receiving rewards (negative + or positive) as feedback\",\"widgetModels\":[],\"youtubeId\":\"q0BiUn5LiBc\",\"id\":\"reinforcement-learning\",\"label\":\"Reinforcement + Learning\",\"libraries\":[\"transformers\",\"stable-baselines3\",\"ml-agents\",\"sample-factory\"]},\"sentence-similarity\":{\"datasets\":[{\"description\":\"Bing + queries with relevant passages from various web sources.\",\"id\":\"ms_marco\"}],\"demo\":{\"inputs\":[{\"label\":\"Source + sentence\",\"content\":\"Machine learning is so easy.\",\"type\":\"text\"},{\"label\":\"Sentences + to compare to\",\"content\":\"Deep learning is so straightforward.\",\"type\":\"text\"},{\"label\":\"\",\"content\":\"This + is so difficult, like rocket science.\",\"type\":\"text\"},{\"label\":\"\",\"content\":\"I + can't believe how much I struggled with this.\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Deep + learning is so straightforward.\",\"score\":0.623},{\"label\":\"This is so + difficult, like rocket science.\",\"score\":0.413},{\"label\":\"I can't believe + how much I struggled with this.\",\"score\":0.256}]}]},\"metrics\":[{\"description\":\"Reciprocal + Rank is a measure used to rank the relevancy of documents given a set of documents. + Reciprocal Rank is the reciprocal of the rank of the document retrieved, meaning, + if the rank is 3, the Reciprocal Rank is 0.33. If the rank is 1, the Reciprocal + Rank is 1\",\"id\":\"Mean Reciprocal Rank\"},{\"description\":\"The similarity + of the embeddings is evaluated mainly on cosine similarity. It is calculated + as the cosine of the angle between two vectors. It is particularly useful + when your texts are not the same length\",\"id\":\"Cosine Similarity\"}],\"models\":[{\"description\":\"This + model works well for sentences and paragraphs and can be used for clustering/grouping + and semantic searches.\",\"id\":\"sentence-transformers/all-mpnet-base-v2\"},{\"description\":\"A + multilingual robust sentence similarity model..\",\"id\":\"BAAI/bge-m3\"}],\"spaces\":[{\"description\":\"An + application that leverages sentence similarity to answer questions from YouTube + videos.\",\"id\":\"Gradio-Blocks/Ask_Questions_To_YouTube_Videos\"},{\"description\":\"An + application that retrieves relevant PubMed abstracts for a given online article + which can be used as further references.\",\"id\":\"Gradio-Blocks/pubmed-abstract-retriever\"},{\"description\":\"An + application that leverages sentence similarity to summarize text.\",\"id\":\"nickmuchi/article-text-summarizer\"},{\"description\":\"A + guide that explains how Sentence Transformers can be used for semantic search.\",\"id\":\"sentence-transformers/Sentence_Transformers_for_semantic_search\"}],\"summary\":\"Sentence + Similarity is the task of determining how similar two texts are. Sentence + similarity models convert input texts into vectors (embeddings) that capture + semantic information and calculate how close (similar) they are between them. + This task is particularly useful for information retrieval and clustering/grouping.\",\"widgetModels\":[\"BAAI/bge-small-en-v1.5\"],\"youtubeId\":\"VCZq5AkbNEU\",\"id\":\"sentence-similarity\",\"label\":\"Sentence + Similarity\",\"libraries\":[\"sentence-transformers\",\"spacy\",\"transformers.js\"]},\"summarization\":{\"canonicalId\":\"text2text-generation\",\"datasets\":[{\"description\":\"News + articles in five different languages along with their summaries. Widely used + for benchmarking multilingual summarization models.\",\"id\":\"mlsum\"},{\"description\":\"English + conversations and their summaries. Useful for benchmarking conversational + agents.\",\"id\":\"samsum\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"The + tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey + building, and the tallest structure in Paris. Its base is square, measuring + 125 metres (410 ft) on each side. It was the first structure to reach a height + of 300 metres. Excluding transmitters, the Eiffel Tower is the second tallest + free-standing structure in France after the Millau Viaduct.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Output\",\"content\":\"The + tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey + building. It was the first structure to reach a height of 300 metres.\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"The + generated sequence is compared against its summary, and the overlap of tokens + are counted. ROUGE-N refers to overlap of N subsequent tokens, ROUGE-1 refers + to overlap of single tokens and ROUGE-2 is the overlap of two subsequent tokens.\",\"id\":\"rouge\"}],\"models\":[{\"description\":\"A + strong summarization model trained on English news articles. Excels at generating + factual summaries.\",\"id\":\"facebook/bart-large-cnn\"},{\"description\":\"A + summarization model trained on medical articles.\",\"id\":\"Falconsai/medical_summarization\"}],\"spaces\":[{\"description\":\"An + application that can summarize long paragraphs.\",\"id\":\"pszemraj/summarize-long-text\"},{\"description\":\"A + much needed summarization application for terms and conditions.\",\"id\":\"ml6team/distilbart-tos-summarizer-tosdr\"},{\"description\":\"An + application that summarizes long documents.\",\"id\":\"pszemraj/document-summarization\"},{\"description\":\"An + application that can detect errors in abstractive summarization.\",\"id\":\"ml6team/post-processing-summarization\"}],\"summary\":\"Summarization + is the task of producing a shorter version of a document while preserving + its important information. Some models can extract text from the original + input, while other models can generate entirely new text.\",\"widgetModels\":[\"facebook/bart-large-cnn\"],\"youtubeId\":\"yHnr5Dk2zCI\",\"id\":\"summarization\",\"label\":\"Summarization\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"table-question-answering\":{\"datasets\":[{\"description\":\"The + WikiTableQuestions dataset is a large-scale dataset for the task of question + answering on semi-structured tables.\",\"id\":\"wikitablequestions\"},{\"description\":\"WikiSQL + is a dataset of 80654 hand-annotated examples of questions and SQL queries + distributed across 24241 tables from Wikipedia.\",\"id\":\"wikisql\"}],\"demo\":{\"inputs\":[{\"table\":[[\"Rank\",\"Name\",\"No.of + reigns\",\"Combined days\"],[\"1\",\"lou Thesz\",\"3\",\"3749\"],[\"2\",\"Ric + Flair\",\"8\",\"3103\"],[\"3\",\"Harley Race\",\"7\",\"1799\"]],\"type\":\"tabular\"},{\"label\":\"Question\",\"content\":\"What + is the number of reigns for Harley Race?\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Result\",\"content\":\"7\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Checks + whether the predicted answer(s) is the same as the ground-truth answer(s).\",\"id\":\"Denotation + Accuracy\"}],\"models\":[{\"description\":\"A table question answering model + that is capable of neural SQL execution, i.e., employ TAPEX to execute a SQL + query on a given table.\",\"id\":\"microsoft/tapex-base\"},{\"description\":\"A + robust table question answering model.\",\"id\":\"google/tapas-base-finetuned-wtq\"}],\"spaces\":[{\"description\":\"An + application that answers questions based on table CSV files.\",\"id\":\"katanaml/table-query\"}],\"summary\":\"Table + Question Answering (Table QA) is the answering a question about an information + on a given table.\",\"widgetModels\":[\"google/tapas-base-finetuned-wtq\"],\"id\":\"table-question-answering\",\"label\":\"Table + Question Answering\",\"libraries\":[\"transformers\"]},\"tabular-classification\":{\"datasets\":[{\"description\":\"A + comprehensive curation of datasets covering all benchmarks.\",\"id\":\"inria-soda/tabular-benchmark\"}],\"demo\":{\"inputs\":[{\"table\":[[\"Glucose\",\"Blood + Pressure \",\"Skin Thickness\",\"Insulin\",\"BMI\"],[\"148\",\"72\",\"35\",\"0\",\"33.6\"],[\"150\",\"50\",\"30\",\"0\",\"35.1\"],[\"141\",\"60\",\"29\",\"1\",\"39.2\"]],\"type\":\"tabular\"}],\"outputs\":[{\"table\":[[\"Diabetes\"],[\"1\"],[\"1\"],[\"0\"]],\"type\":\"tabular\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"Breast + cancer prediction model based on decision trees.\",\"id\":\"scikit-learn/cancer-prediction-trees\"}],\"spaces\":[{\"description\":\"An + application that can predict defective products on a production line.\",\"id\":\"scikit-learn/tabular-playground\"},{\"description\":\"An + application that compares various tabular classification techniques on different + datasets.\",\"id\":\"scikit-learn/classification\"}],\"summary\":\"Tabular + classification is the task of classifying a target category (a group) based + on set of attributes.\",\"widgetModels\":[\"scikit-learn/tabular-playground\"],\"youtubeId\":\"\",\"id\":\"tabular-classification\",\"label\":\"Tabular + Classification\",\"libraries\":[\"sklearn\"]},\"tabular-regression\":{\"datasets\":[{\"description\":\"A + comprehensive curation of datasets covering all benchmarks.\",\"id\":\"inria-soda/tabular-benchmark\"}],\"demo\":{\"inputs\":[{\"table\":[[\"Car + Name\",\"Horsepower\",\"Weight\"],[\"ford torino\",\"140\",\"3,449\"],[\"amc + hornet\",\"97\",\"2,774\"],[\"toyota corolla\",\"65\",\"1,773\"]],\"type\":\"tabular\"}],\"outputs\":[{\"table\":[[\"MPG + (miles per gallon)\"],[\"17\"],[\"18\"],[\"31\"]],\"type\":\"tabular\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"mse\"},{\"description\":\"Coefficient + of determination (or R-squared) is a measure of how well the model fits the + data. Higher R-squared is considered a better fit.\",\"id\":\"r-squared\"}],\"models\":[{\"description\":\"Fish + weight prediction based on length measurements and species.\",\"id\":\"scikit-learn/Fish-Weight\"}],\"spaces\":[{\"description\":\"An + application that can predict weight of a fish based on set of attributes.\",\"id\":\"scikit-learn/fish-weight-prediction\"}],\"summary\":\"Tabular + regression is the task of predicting a numerical value given a set of attributes.\",\"widgetModels\":[\"scikit-learn/Fish-Weight\"],\"youtubeId\":\"\",\"id\":\"tabular-regression\",\"label\":\"Tabular + Regression\",\"libraries\":[\"sklearn\"]},\"text-classification\":{\"datasets\":[{\"description\":\"A + widely used dataset used to benchmark multiple variants of text classification.\",\"id\":\"nyu-mll/glue\"},{\"description\":\"A + text classification dataset used to benchmark natural language inference models\",\"id\":\"stanfordnlp/snli\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"I + love Hugging Face!\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"POSITIVE\",\"score\":0.9},{\"label\":\"NEUTRAL\",\"score\":0.1},{\"label\":\"NEGATIVE\",\"score\":0}]}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"The + F1 metric is the harmonic mean of the precision and recall. It can be calculated + as: F1 = 2 * (precision * recall) / (precision + recall)\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + robust model trained for sentiment analysis.\",\"id\":\"distilbert/distilbert-base-uncased-finetuned-sst-2-english\"},{\"description\":\"A + sentiment analysis model specialized in financial sentiment.\",\"id\":\"ProsusAI/finbert\"},{\"description\":\"A + sentiment analysis model specialized in analyzing tweets.\",\"id\":\"cardiffnlp/twitter-roberta-base-sentiment-latest\"},{\"description\":\"A + model that can classify languages.\",\"id\":\"papluca/xlm-roberta-base-language-detection\"},{\"description\":\"A + model that can classify text generation attacks.\",\"id\":\"meta-llama/Prompt-Guard-86M\"}],\"spaces\":[{\"description\":\"An + application that can classify financial sentiment.\",\"id\":\"IoannisTr/Tech_Stocks_Trading_Assistant\"},{\"description\":\"A + dashboard that contains various text classification tasks.\",\"id\":\"miesnerjacob/Multi-task-NLP\"},{\"description\":\"An + application that analyzes user reviews in healthcare.\",\"id\":\"spacy/healthsea-demo\"}],\"summary\":\"Text + Classification is the task of assigning a label or class to a given text. + Some use cases are sentiment analysis, natural language inference, and assessing + grammatical correctness.\",\"widgetModels\":[\"distilbert/distilbert-base-uncased-finetuned-sst-2-english\"],\"youtubeId\":\"leNG9fN9FQU\",\"id\":\"text-classification\",\"label\":\"Text + Classification\",\"libraries\":[\"adapter-transformers\",\"setfit\",\"spacy\",\"transformers\",\"transformers.js\"]},\"text-generation\":{\"datasets\":[{\"description\":\"A + large multilingual dataset of text crawled from the web.\",\"id\":\"mc4\"},{\"description\":\"Diverse + open-source data consisting of 22 smaller high-quality datasets. It was used + to train GPT-Neo.\",\"id\":\"the_pile\"},{\"description\":\"Truly open-source, + curated and cleaned dialogue dataset.\",\"id\":\"HuggingFaceH4/ultrachat_200k\"},{\"description\":\"An + instruction dataset with preference ratings on responses.\",\"id\":\"openbmb/UltraFeedback\"},{\"description\":\"A + large synthetic dataset for alignment of text generation models.\",\"id\":\"argilla/magpie-ultra-v0.1\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"Once + upon a time,\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Output\",\"content\":\"Once + upon a time, we knew that our ancestors were on the verge of extinction. The + great explorers and poets of the Old World, from Alexander the Great to Chaucer, + are dead and gone. A good many of our ancient explorers and poets have\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Cross + Entropy is a metric that calculates the difference between two probability + distributions. Each probability distribution is the distribution of predicted + words\",\"id\":\"Cross Entropy\"},{\"description\":\"The Perplexity metric + is the exponential of the cross-entropy loss. It evaluates the probabilities + assigned to the next word by the model. Lower perplexity indicates better + performance\",\"id\":\"Perplexity\"}],\"models\":[{\"description\":\"A text-generation + model trained to follow instructions.\",\"id\":\"google/gemma-2-2b-it\"},{\"description\":\"Very + powerful text generation model trained to follow instructions.\",\"id\":\"meta-llama/Meta-Llama-3.1-8B-Instruct\"},{\"description\":\"Small + yet powerful text generation model.\",\"id\":\"microsoft/Phi-3-mini-4k-instruct\"},{\"description\":\"A + very powerful model that can solve mathematical problems.\",\"id\":\"AI-MO/NuminaMath-7B-TIR\"},{\"description\":\"Strong + text generation model to follow instructions.\",\"id\":\"Qwen/Qwen2.5-7B-Instruct\"},{\"description\":\"Very + strong open-source large language model.\",\"id\":\"nvidia/Llama-3.1-Nemotron-70B-Instruct\"}],\"spaces\":[{\"description\":\"A + leaderboard to compare different open-source text generation models based + on various benchmarks.\",\"id\":\"open-llm-leaderboard/open_llm_leaderboard\"},{\"description\":\"A + leaderboard for comparing chain-of-thought performance of models.\",\"id\":\"logikon/open_cot_leaderboard\"},{\"description\":\"An + text generation based application based on a very powerful LLaMA2 model.\",\"id\":\"ysharma/Explore_llamav2_with_TGI\"},{\"description\":\"An + text generation based application to converse with Zephyr model.\",\"id\":\"HuggingFaceH4/zephyr-chat\"},{\"description\":\"A + leaderboard that ranks text generation models based on blind votes from people.\",\"id\":\"lmsys/chatbot-arena-leaderboard\"},{\"description\":\"An + chatbot to converse with a very powerful text generation model.\",\"id\":\"mlabonne/phixtral-chat\"}],\"summary\":\"Generating + text is the task of generating new text given another text. These models can, + for example, fill in incomplete text or paraphrase.\",\"widgetModels\":[\"mistralai/Mistral-Nemo-Instruct-2407\"],\"youtubeId\":\"e9gNEAlsOvU\",\"id\":\"text-generation\",\"label\":\"Text + Generation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"text-to-image\":{\"datasets\":[{\"description\":\"RedCaps + is a large-scale dataset of 12M image-text pairs collected from Reddit.\",\"id\":\"red_caps\"},{\"description\":\"Conceptual + Captions is a dataset consisting of ~3.3M images annotated with captions.\",\"id\":\"conceptual_captions\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"A + city above clouds, pastel colors, Victorian style\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"image.jpeg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + Inception Score (IS) measure assesses diversity and meaningfulness. It uses + a generated image sample to predict its label. A higher score signifies more + diverse and meaningful images.\",\"id\":\"IS\"},{\"description\":\"The Fr\xE9chet + Inception Distance (FID) calculates the distance between distributions between + synthetic and real samples. A lower FID score indicates better similarity + between the distributions of real and generated images.\",\"id\":\"FID\"},{\"description\":\"R-precision + assesses how the generated image aligns with the provided text description. + It uses the generated images as queries to retrieve relevant text descriptions. + The top 'r' relevant descriptions are selected and used to calculate R-precision + as r/R, where 'R' is the number of ground truth descriptions associated with + the generated images. A higher R-precision value indicates a better model.\",\"id\":\"R-Precision\"}],\"models\":[{\"description\":\"One + of the most powerful image generation models that can generate realistic outputs.\",\"id\":\"black-forest-labs/FLUX.1-dev\"},{\"description\":\"A + powerful yet fast image generation model.\",\"id\":\"latent-consistency/lcm-lora-sdxl\"},{\"description\":\"Text-to-image + model for photorealistic generation.\",\"id\":\"Kwai-Kolors/Kolors\"},{\"description\":\"A + powerful text-to-image model.\",\"id\":\"stabilityai/stable-diffusion-3-medium-diffusers\"}],\"spaces\":[{\"description\":\"A + powerful text-to-image application.\",\"id\":\"stabilityai/stable-diffusion-3-medium\"},{\"description\":\"A + text-to-image application to generate comics.\",\"id\":\"jbilcke-hf/ai-comic-factory\"},{\"description\":\"An + application to match multiple custom image generation models.\",\"id\":\"multimodalart/flux-lora-lab\"},{\"description\":\"A + powerful yet very fast image generation application.\",\"id\":\"latent-consistency/lcm-lora-for-sdxl\"},{\"description\":\"A + gallery to explore various text-to-image models.\",\"id\":\"multimodalart/LoraTheExplorer\"},{\"description\":\"An + application for `text-to-image`, `image-to-image` and image inpainting.\",\"id\":\"ArtGAN/Stable-Diffusion-ControlNet-WebUI\"},{\"description\":\"An + application to generate realistic images given photos of a person and a prompt.\",\"id\":\"InstantX/InstantID\"}],\"summary\":\"Text-to-image + is the task of generating images from input text. These pipelines can also + be used to modify and edit images based on text prompts.\",\"widgetModels\":[\"black-forest-labs/FLUX.1-dev\"],\"youtubeId\":\"\",\"id\":\"text-to-image\",\"label\":\"Text-to-Image\",\"libraries\":[\"diffusers\"]},\"text-to-speech\":{\"canonicalId\":\"text-to-audio\",\"datasets\":[{\"description\":\"10K + hours of multi-speaker English dataset.\",\"id\":\"parler-tts/mls_eng_10k\"},{\"description\":\"Multi-speaker + English dataset.\",\"id\":\"mythicinfinity/libritts_r\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"I + love audio models on the Hub!\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"audio.wav\",\"type\":\"audio\"}]},\"metrics\":[{\"description\":\"The + Mel Cepstral Distortion (MCD) metric is used to calculate the quality of generated + speech.\",\"id\":\"mel cepstral distortion\"}],\"models\":[{\"description\":\"A + powerful TTS model.\",\"id\":\"parler-tts/parler-tts-large-v1\"},{\"description\":\"A + massively multi-lingual TTS model.\",\"id\":\"coqui/XTTS-v2\"},{\"description\":\"Robust + TTS model.\",\"id\":\"metavoiceio/metavoice-1B-v0.1\"},{\"description\":\"A + prompt based, powerful TTS model.\",\"id\":\"parler-tts/parler_tts_mini_v0.1\"}],\"spaces\":[{\"description\":\"An + application for generate highly realistic, multilingual speech.\",\"id\":\"suno/bark\"},{\"description\":\"An + application on XTTS, a voice generation model that lets you clone voices into + different languages.\",\"id\":\"coqui/xtts\"},{\"description\":\"An application + that generates speech in different styles in English and Chinese.\",\"id\":\"mrfakename/E2-F5-TTS\"},{\"description\":\"An + application that synthesizes speech for diverse speaker prompts.\",\"id\":\"parler-tts/parler_tts_mini\"}],\"summary\":\"Text-to-Speech + (TTS) is the task of generating natural sounding speech given text input. + TTS models can be extended to have a single model that generates speech for + multiple speakers and multiple languages.\",\"widgetModels\":[\"suno/bark\"],\"youtubeId\":\"NW62DpzJ274\",\"id\":\"text-to-speech\",\"label\":\"Text-to-Speech\",\"libraries\":[\"espnet\",\"tensorflowtts\",\"transformers\",\"transformers.js\"]},\"text-to-video\":{\"datasets\":[{\"description\":\"Microsoft + Research Video to Text is a large-scale dataset for open domain video captioning\",\"id\":\"iejMac/CLIP-MSR-VTT\"},{\"description\":\"UCF101 + Human Actions dataset consists of 13,320 video clips from YouTube, with 101 + classes.\",\"id\":\"quchenyuan/UCF101-ZIP\"},{\"description\":\"A high-quality + dataset for human action recognition in YouTube videos.\",\"id\":\"nateraw/kinetics\"},{\"description\":\"A + dataset of video clips of humans performing pre-defined basic actions with + everyday objects.\",\"id\":\"HuggingFaceM4/something_something_v2\"},{\"description\":\"This + dataset consists of text-video pairs and contains noisy samples with irrelevant + video descriptions\",\"id\":\"HuggingFaceM4/webvid\"},{\"description\":\"A + dataset of short Flickr videos for the temporal localization of events with + descriptions.\",\"id\":\"iejMac/CLIP-DiDeMo\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"Darth + Vader is surfing on the waves.\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"text-to-video-output.gif\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"Inception + Score uses an image classification model that predicts class labels and evaluates + how distinct and diverse the images are. A higher score indicates better video + generation.\",\"id\":\"is\"},{\"description\":\"Frechet Inception Distance + uses an image classification model to obtain image embeddings. The metric + compares mean and standard deviation of the embeddings of real and generated + images. A smaller score indicates better video generation.\",\"id\":\"fid\"},{\"description\":\"Frechet + Video Distance uses a model that captures coherence for changes in frames + and the quality of each frame. A smaller score indicates better video generation.\",\"id\":\"fvd\"},{\"description\":\"CLIPSIM + measures similarity between video frames and text using an image-text similarity + model. A higher score indicates better video generation.\",\"id\":\"clipsim\"}],\"models\":[{\"description\":\"A + strong model for consistent video generation.\",\"id\":\"rain1011/pyramid-flow-sd3\"},{\"description\":\"A + robust model for text-to-video generation.\",\"id\":\"VideoCrafter/VideoCrafter2\"},{\"description\":\"A + cutting-edge text-to-video generation model.\",\"id\":\"TIGER-Lab/T2V-Turbo-V2\"}],\"spaces\":[{\"description\":\"An + application that generates video from text.\",\"id\":\"VideoCrafter/VideoCrafter\"},{\"description\":\"Consistent + video generation application.\",\"id\":\"TIGER-Lab/T2V-Turbo-V2\"},{\"description\":\"A + cutting edge video generation application.\",\"id\":\"Pyramid-Flow/pyramid-flow\"}],\"summary\":\"Text-to-video + models can be used in any application that requires generating consistent + sequence of images from text. \",\"widgetModels\":[],\"id\":\"text-to-video\",\"label\":\"Text-to-Video\",\"libraries\":[\"diffusers\"]},\"token-classification\":{\"datasets\":[{\"description\":\"A + widely used dataset useful to benchmark named entity recognition models.\",\"id\":\"eriktks/conll2003\"},{\"description\":\"A + multilingual dataset of Wikipedia articles annotated for named entity recognition + in over 150 different languages.\",\"id\":\"unimelb-nlp/wikiann\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"My + name is Omar and I live in Z\xFCrich.\",\"type\":\"text\"}],\"outputs\":[{\"text\":\"My + name is Omar and I live in Z\xFCrich.\",\"tokens\":[{\"type\":\"PERSON\",\"start\":11,\"end\":15},{\"type\":\"GPE\",\"start\":30,\"end\":36}],\"type\":\"text-with-tokens\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + robust performance model to identify people, locations, organizations and + names of miscellaneous entities.\",\"id\":\"dslim/bert-base-NER\"},{\"description\":\"A + strong model to identify people, locations, organizations and names in multiple + languages.\",\"id\":\"FacebookAI/xlm-roberta-large-finetuned-conll03-english\"},{\"description\":\"A + token classification model specialized on medical entity recognition.\",\"id\":\"blaze999/Medical-NER\"},{\"description\":\"Flair + models are typically the state of the art in named entity recognition tasks.\",\"id\":\"flair/ner-english\"}],\"spaces\":[{\"description\":\"An + application that can recognizes entities, extracts noun chunks and recognizes + various linguistic features of each token.\",\"id\":\"spacy/gradio_pipeline_visualizer\"}],\"summary\":\"Token + classification is a natural language understanding task in which a label is + assigned to some tokens in a text. Some popular token classification subtasks + are Named Entity Recognition (NER) and Part-of-Speech (PoS) tagging. NER models + could be trained to identify specific entities in a text, such as dates, individuals + and places; and PoS tagging would identify, for example, which words in a + text are verbs, nouns, and punctuation marks.\",\"widgetModels\":[\"FacebookAI/xlm-roberta-large-finetuned-conll03-english\"],\"youtubeId\":\"wVHdVlPScxA\",\"id\":\"token-classification\",\"label\":\"Token + Classification\",\"libraries\":[\"adapter-transformers\",\"flair\",\"spacy\",\"span-marker\",\"stanza\",\"transformers\",\"transformers.js\"]},\"translation\":{\"canonicalId\":\"text2text-generation\",\"datasets\":[{\"description\":\"A + dataset of copyright-free books translated into 16 different languages.\",\"id\":\"Helsinki-NLP/opus_books\"},{\"description\":\"An + example of translation between programming languages. This dataset consists + of functions in Java and C#.\",\"id\":\"google/code_x_glue_cc_code_to_code_trans\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"My + name is Omar and I live in Z\xFCrich.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Output\",\"content\":\"Mein + Name ist Omar und ich wohne in Z\xFCrich.\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"BLEU + score is calculated by counting the number of shared single or subsequent + tokens between the generated sequence and the reference. Subsequent n tokens + are called \u201Cn-grams\u201D. Unigram refers to a single token while bi-gram + refers to token pairs and n-grams refer to n subsequent tokens. The score + ranges from 0 to 1, where 1 means the translation perfectly matched and 0 + did not match at all\",\"id\":\"bleu\"},{\"description\":\"\",\"id\":\"sacrebleu\"}],\"models\":[{\"description\":\"Very + powerful model that can translate many languages between each other, especially + low-resource languages.\",\"id\":\"facebook/nllb-200-1.3B\"},{\"description\":\"A + general-purpose Transformer that can be used to translate from English to + German, French, or Romanian.\",\"id\":\"google-t5/t5-base\"}],\"spaces\":[{\"description\":\"An + application that can translate between 100 languages.\",\"id\":\"Iker/Translate-100-languages\"},{\"description\":\"An + application that can translate between many languages.\",\"id\":\"Geonmo/nllb-translation-demo\"}],\"summary\":\"Translation + is the task of converting text from one language to another.\",\"widgetModels\":[\"facebook/mbart-large-50-many-to-many-mmt\"],\"youtubeId\":\"1JvfrvZgi6c\",\"id\":\"translation\",\"label\":\"Translation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"unconditional-image-generation\":{\"datasets\":[{\"description\":\"The + CIFAR-100 dataset consists of 60000 32x32 colour images in 100 classes, with + 600 images per class.\",\"id\":\"cifar100\"},{\"description\":\"Multiple images + of celebrities, used for facial expression translation.\",\"id\":\"CelebA\"}],\"demo\":{\"inputs\":[{\"label\":\"Seed\",\"content\":\"42\",\"type\":\"text\"},{\"label\":\"Number + of images to generate:\",\"content\":\"4\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"unconditional-image-generation-output.jpeg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + inception score (IS) evaluates the quality of generated images. It measures + the diversity of the generated images (the model predictions are evenly distributed + across all possible labels) and their 'distinction' or 'sharpness' (the model + confidently predicts a single label for each image).\",\"id\":\"Inception + score (IS)\"},{\"description\":\"The Fr\xE9chet Inception Distance (FID) evaluates + the quality of images created by a generative model by calculating the distance + between feature vectors for real and generated images.\",\"id\":\"Fre\u0107het + Inception Distance (FID)\"}],\"models\":[{\"description\":\"High-quality image + generation model trained on the CIFAR-10 dataset. It synthesizes images of + the ten classes presented in the dataset using diffusion probabilistic models, + a class of latent variable models inspired by considerations from nonequilibrium + thermodynamics.\",\"id\":\"google/ddpm-cifar10-32\"},{\"description\":\"High-quality + image generation model trained on the 256x256 CelebA-HQ dataset. It synthesizes + images of faces using diffusion probabilistic models, a class of latent variable + models inspired by considerations from nonequilibrium thermodynamics.\",\"id\":\"google/ddpm-celebahq-256\"}],\"spaces\":[{\"description\":\"An + application that can generate realistic faces.\",\"id\":\"CompVis/celeba-latent-diffusion\"}],\"summary\":\"Unconditional + image generation is the task of generating images with no condition in any + context (like a prompt text or another image). Once trained, the model will + create images that resemble its training data distribution.\",\"widgetModels\":[\"\"],\"youtubeId\":\"\",\"id\":\"unconditional-image-generation\",\"label\":\"Unconditional + Image Generation\",\"libraries\":[\"diffusers\"]},\"video-text-to-text\":{\"datasets\":[{\"description\":\"Multiple-choice + questions and answers about videos.\",\"id\":\"lmms-lab/Video-MME\"},{\"description\":\"A + dataset of instructions and question-answer pairs about videos.\",\"id\":\"lmms-lab/VideoChatGPT\"},{\"description\":\"Large + video understanding dataset.\",\"id\":\"HuggingFaceFV/finevideo\"}],\"demo\":{\"inputs\":[{\"filename\":\"video-text-to-text-input.gif\",\"type\":\"img\"},{\"label\":\"Text + Prompt\",\"content\":\"What is happening in this video?\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"The + video shows a series of images showing a fountain with water jets and a variety + of colorful flowers and butterflies in the background.\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + robust video-text-to-text model that can take in image and video inputs.\",\"id\":\"llava-hf/llava-onevision-qwen2-72b-ov-hf\"},{\"description\":\"Large + and powerful video-text-to-text model that can take in image and video inputs.\",\"id\":\"llava-hf/LLaVA-NeXT-Video-34B-hf\"}],\"spaces\":[{\"description\":\"An + application to chat with a video-text-to-text model.\",\"id\":\"llava-hf/video-llava\"},{\"description\":\"A + leaderboard for various video-text-to-text models.\",\"id\":\"opencompass/openvlm_video_leaderboard\"}],\"summary\":\"Video-text-to-text + models take in a video and a text prompt and output text. These models are + also called video-language models.\",\"widgetModels\":[\"\"],\"youtubeId\":\"\",\"id\":\"video-text-to-text\",\"label\":\"Video-Text-to-Text\",\"libraries\":[\"transformers\"]},\"visual-question-answering\":{\"datasets\":[{\"description\":\"A + widely used dataset containing questions (with answers) about images.\",\"id\":\"Graphcore/vqa\"},{\"description\":\"A + dataset to benchmark visual reasoning based on text in images.\",\"id\":\"facebook/textvqa\"}],\"demo\":{\"inputs\":[{\"filename\":\"elephant.jpeg\",\"type\":\"img\"},{\"label\":\"Question\",\"content\":\"What + is in this image?\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"elephant\",\"score\":0.97},{\"label\":\"elephants\",\"score\":0.06},{\"label\":\"animal\",\"score\":0.003}]}]},\"isPlaceholder\":false,\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"Measures + how much a predicted answer differs from the ground truth based on the difference + in their semantic meaning.\",\"id\":\"wu-palmer similarity\"}],\"models\":[{\"description\":\"A + visual question answering model trained to convert charts and plots to text.\",\"id\":\"google/deplot\"},{\"description\":\"A + visual question answering model trained for mathematical reasoning and chart + derendering from images.\",\"id\":\"google/matcha-base\"},{\"description\":\"A + strong visual question answering that answers questions from book covers.\",\"id\":\"google/pix2struct-ocrvqa-large\"}],\"spaces\":[{\"description\":\"An + application that compares visual question answering models across different + tasks.\",\"id\":\"merve/pix2struct\"},{\"description\":\"An application that + can answer questions based on images.\",\"id\":\"nielsr/vilt-vqa\"},{\"description\":\"An + application that can caption images and answer questions about a given image. + \",\"id\":\"Salesforce/BLIP\"},{\"description\":\"An application that can + caption images and answer questions about a given image. \",\"id\":\"vumichien/Img2Prompt\"}],\"summary\":\"Visual + Question Answering is the task of answering open-ended questions based on + an image. They output natural language responses to natural language questions.\",\"widgetModels\":[\"dandelin/vilt-b32-finetuned-vqa\"],\"youtubeId\":\"\",\"id\":\"visual-question-answering\",\"label\":\"Visual + Question Answering\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"zero-shot-classification\":{\"datasets\":[{\"description\":\"A + widely used dataset used to benchmark multiple variants of text classification.\",\"id\":\"nyu-mll/glue\"},{\"description\":\"The + Multi-Genre Natural Language Inference (MultiNLI) corpus is a crowd-sourced + collection of 433k sentence pairs annotated with textual entailment information.\",\"id\":\"nyu-mll/multi_nli\"},{\"description\":\"FEVER + is a publicly available dataset for fact extraction and verification against + textual sources.\",\"id\":\"fever/fever\"}],\"demo\":{\"inputs\":[{\"label\":\"Text + Input\",\"content\":\"Dune is the best movie ever.\",\"type\":\"text\"},{\"label\":\"Candidate + Labels\",\"content\":\"CINEMA, ART, MUSIC\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"CINEMA\",\"score\":0.9},{\"label\":\"ART\",\"score\":0.1},{\"label\":\"MUSIC\",\"score\":0}]}]},\"metrics\":[],\"models\":[{\"description\":\"Powerful + zero-shot text classification model.\",\"id\":\"facebook/bart-large-mnli\"},{\"description\":\"Powerful + zero-shot multilingual text classification model that can accomplish multiple + tasks.\",\"id\":\"MoritzLaurer/mDeBERTa-v3-base-xnli-multilingual-nli-2mil7\"}],\"spaces\":[],\"summary\":\"Zero-shot + text classification is a task in natural language processing where a model + is trained on a set of labeled examples but is then able to classify new examples + from previously unseen classes.\",\"widgetModels\":[\"facebook/bart-large-mnli\"],\"id\":\"zero-shot-classification\",\"label\":\"Zero-Shot + Classification\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"zero-shot-image-classification\":{\"datasets\":[{\"description\":\"\",\"id\":\"\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-classification-input.jpeg\",\"type\":\"img\"},{\"label\":\"Classes\",\"content\":\"cat, + dog, bird\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Cat\",\"score\":0.664},{\"label\":\"Dog\",\"score\":0.329},{\"label\":\"Bird\",\"score\":0.008}]}]},\"metrics\":[{\"description\":\"Computes + the number of times the correct label appears in top K labels predicted\",\"id\":\"top-K + accuracy\"}],\"models\":[{\"description\":\"Robust image classification model + trained on publicly available image-caption data.\",\"id\":\"openai/clip-vit-base-patch16\"},{\"description\":\"Strong + zero-shot image classification model.\",\"id\":\"google/siglip-so400m-patch14-224\"},{\"description\":\"Small + yet powerful zero-shot image classification model that can run on edge devices.\",\"id\":\"apple/MobileCLIP-S1-OpenCLIP\"},{\"description\":\"Strong + image classification model for biomedical domain.\",\"id\":\"microsoft/BiomedCLIP-PubMedBERT_256-vit_base_patch16_224\"}],\"spaces\":[{\"description\":\"An + application that leverages zero-shot image classification to find best captions + to generate an image. \",\"id\":\"pharma/CLIP-Interrogator\"},{\"description\":\"An + application to compare different zero-shot image classification models. \",\"id\":\"merve/compare_clip_siglip\"}],\"summary\":\"Zero-shot + image classification is the task of classifying previously unseen classes + during training of a model.\",\"widgetModels\":[\"google/siglip-so400m-patch14-224\"],\"youtubeId\":\"\",\"id\":\"zero-shot-image-classification\",\"label\":\"Zero-Shot + Image Classification\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"zero-shot-object-detection\":{\"datasets\":[],\"demo\":{\"inputs\":[{\"filename\":\"zero-shot-object-detection-input.jpg\",\"type\":\"img\"},{\"label\":\"Classes\",\"content\":\"cat, + dog, bird\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"zero-shot-object-detection-output.jpg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + Average Precision (AP) metric is the Area Under the PR Curve (AUC-PR). It + is calculated for each class separately\",\"id\":\"Average Precision\"},{\"description\":\"The + Mean Average Precision (mAP) metric is the overall average of the AP values\",\"id\":\"Mean + Average Precision\"},{\"description\":\"The AP\u03B1 metric is the Average + Precision at the IoU threshold of a \u03B1 value, for example, AP50 and AP75\",\"id\":\"AP\u03B1\"}],\"models\":[{\"description\":\"Solid + zero-shot object detection model.\",\"id\":\"IDEA-Research/grounding-dino-base\"},{\"description\":\"Cutting-edge + zero-shot object detection model.\",\"id\":\"google/owlv2-base-patch16-ensemble\"}],\"spaces\":[{\"description\":\"A + demo to try the state-of-the-art zero-shot object detection model, OWLv2.\",\"id\":\"merve/owlv2\"},{\"description\":\"A + demo that combines a zero-shot object detection and mask generation model + for zero-shot segmentation.\",\"id\":\"merve/OWLSAM\"}],\"summary\":\"Zero-shot + object detection is a computer vision task to detect objects and their classes + in images, without any prior training or knowledge of the classes. Zero-shot + object detection models receive an image as input, as well as a list of candidate + classes, and output the bounding boxes and labels where the objects have been + detected.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"zero-shot-object-detection\",\"label\":\"Zero-Shot + Object Detection\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"text-to-3d\":{\"datasets\":[{\"description\":\"A + large dataset of over 10 million 3D objects.\",\"id\":\"allenai/objaverse-xl\"},{\"description\":\"Descriptive + captions for 3D objects in Objaverse.\",\"id\":\"tiange/Cap3D\"}],\"demo\":{\"inputs\":[{\"label\":\"Prompt\",\"content\":\"a + cat statue\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Result\",\"content\":\"text-to-3d-3d-output-filename.glb\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"Text-to-3D + mesh model by OpenAI\",\"id\":\"openai/shap-e\"},{\"description\":\"Generative + 3D gaussian splatting model.\",\"id\":\"ashawkey/LGM\"}],\"spaces\":[{\"description\":\"Text-to-3D + demo with mesh outputs.\",\"id\":\"hysts/Shap-E\"},{\"description\":\"Text/image-to-3D + demo with splat outputs.\",\"id\":\"ashawkey/LGM\"}],\"summary\":\"Text-to-3D + models take in text input and produce 3D output.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"text-to-3d\",\"label\":\"Text-to-3D\",\"libraries\":[\"diffusers\"]},\"image-to-3d\":{\"datasets\":[{\"description\":\"A + large dataset of over 10 million 3D objects.\",\"id\":\"allenai/objaverse-xl\"},{\"description\":\"A + dataset of isolated object images for evaluating image-to-3D models.\",\"id\":\"dylanebert/iso3d\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-to-3d-image-input.png\",\"type\":\"img\"}],\"outputs\":[{\"label\":\"Result\",\"content\":\"image-to-3d-3d-output-filename.glb\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"Fast + image-to-3D mesh model by Tencent.\",\"id\":\"TencentARC/InstantMesh\"},{\"description\":\"Fast + image-to-3D mesh model by StabilityAI\",\"id\":\"stabilityai/TripoSR\"},{\"description\":\"A + scaled up image-to-3D mesh model derived from TripoSR.\",\"id\":\"hwjiang/Real3D\"},{\"description\":\"Generative + 3D gaussian splatting model.\",\"id\":\"ashawkey/LGM\"}],\"spaces\":[{\"description\":\"Leaderboard + to evaluate image-to-3D models.\",\"id\":\"dylanebert/3d-arena\"},{\"description\":\"Image-to-3D + demo with mesh outputs.\",\"id\":\"TencentARC/InstantMesh\"},{\"description\":\"Image-to-3D + demo with mesh outputs.\",\"id\":\"stabilityai/TripoSR\"},{\"description\":\"Image-to-3D + demo with mesh outputs.\",\"id\":\"hwjiang/Real3D\"},{\"description\":\"Image-to-3D + demo with splat outputs.\",\"id\":\"dylanebert/LGM-mini\"}],\"summary\":\"Image-to-3D + models take in image input and produce 3D output.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"image-to-3d\",\"label\":\"Image-to-3D\",\"libraries\":[\"diffusers\"]}}" + headers: + Access-Control-Allow-Origin: + - https://huggingface.co + Access-Control-Expose-Headers: + - X-Repo-Commit,X-Request-Id,X-Error-Code,X-Error-Message,X-Total-Count,ETag,Link,Accept-Ranges,Content-Range + Connection: + - keep-alive + Content-Length: + - '73726' + Content-Type: + - application/json; charset=utf-8 + Date: + - Thu, 31 Oct 2024 14:22:20 GMT + ETag: + - W/"11ffe-LYeRHzSmA1Ja2XTx5UWNPygcpnc" + Referrer-Policy: + - strict-origin-when-cross-origin + Vary: + - Origin + Via: + - 1.1 3734acf137431fb00caf3c73f9eb75fa.cloudfront.net (CloudFront) + X-Amz-Cf-Id: + - GMQZ99CN4IcE7tICuhanUhZxxUAxI19ca0CQHGX_-jOTAMhDrP82BQ== + X-Amz-Cf-Pop: + - CCU50-P1 + X-Cache: + - Miss from cloudfront + X-Powered-By: + - huggingface-moon + X-Request-Id: + - Root=1-6723929c-59887f1408bc085852401b36;4a7a0d59-8435-4b07-a0c4-9b8b74f1066d + cross-origin-opener-policy: + - same-origin + status: + code: 200 + message: OK +- request: + body: '{"inputs": "A new model offers an explanation for how the Galilean satellites + formed around the solar system''slargest world. Konstantin Batygin did not set + out to solve one of the solar system''s most puzzling mysteries when he went + for a run up a hill in Nice, France.", "parameters": {"candidate_labels": ["scientific + discovery"], "multi_label": false}}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate, br, zstd + Connection: + - keep-alive + Content-Length: + - '354' + Content-Type: + - application/json + X-Amzn-Trace-Id: + - 992f3b68-02fa-4af3-add1-5e8e73acac68 + user-agent: + - unknown/None; hf_hub/0.26.2; python/3.9.12; torch/2.4.1 + method: POST + uri: https://api-inference.huggingface.co/models/facebook/bart-large-mnli + response: + body: + string: '{"sequence":"A new model offers an explanation for how the Galilean + satellites formed around the solar system''slargest world. Konstantin Batygin + did not set out to solve one of the solar system''s most puzzling mysteries + when he went for a run up a hill in Nice, France.","labels":["scientific discovery"],"scores":[0.9829296469688416]}' + headers: + Connection: + - keep-alive + Content-Length: + - '335' + Content-Type: + - application/json + Date: + - Thu, 31 Oct 2024 14:22:21 GMT + access-control-allow-credentials: + - 'true' + vary: + - Origin, Access-Control-Request-Method, Access-Control-Request-Headers + x-compute-time: + - '0.178' + x-compute-type: + - cache + x-request-id: + - GzTCgDeN6QrafetgpVs2o + x-sha: + - d7645e127eaf1aefc7862fd59a17a5aa8558b8ce + status: + code: 200 + message: OK +version: 1 diff --git a/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_zero_shot_classification_async.yaml b/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_zero_shot_classification_async.yaml new file mode 100644 index 00000000000..59aaa6e15e4 --- /dev/null +++ b/tests/integrations/huggingface/cassettes/huggingface_test/test_huggingface_zero_shot_classification_async.yaml @@ -0,0 +1,775 @@ +interactions: +- request: + body: null + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate, br, zstd + Connection: + - keep-alive + X-Amzn-Trace-Id: + - d1943f1d-c463-48d4-b807-02bbb0da0f46 + user-agent: + - unknown/None; hf_hub/0.26.2; python/3.9.12; torch/2.4.1 + method: GET + uri: https://huggingface.co/api/tasks + response: + body: + string: "{\"any-to-any\":{\"datasets\":[],\"demo\":{\"inputs\":[],\"outputs\":[]},\"isPlaceholder\":true,\"metrics\":[],\"models\":[],\"spaces\":[],\"summary\":\"\",\"widgetModels\":[],\"id\":\"any-to-any\",\"label\":\"Any-to-Any\",\"libraries\":[\"transformers\"]},\"audio-classification\":{\"datasets\":[{\"description\":\"A + benchmark of 10 different audio tasks.\",\"id\":\"s3prl/superb\"},{\"description\":\"A + dataset of YouTube clips and their sound categories.\",\"id\":\"agkphysics/AudioSet\"}],\"demo\":{\"inputs\":[{\"filename\":\"audio.wav\",\"type\":\"audio\"}],\"outputs\":[{\"data\":[{\"label\":\"Up\",\"score\":0.2},{\"label\":\"Down\",\"score\":0.8}],\"type\":\"chart\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"An + easy-to-use model for command recognition.\",\"id\":\"speechbrain/google_speech_command_xvector\"},{\"description\":\"An + emotion recognition model.\",\"id\":\"ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition\"},{\"description\":\"A + language identification model.\",\"id\":\"facebook/mms-lid-126\"}],\"spaces\":[{\"description\":\"An + application that can classify music into different genre.\",\"id\":\"kurianbenoy/audioclassification\"}],\"summary\":\"Audio + classification is the task of assigning a label or class to a given audio. + It can be used for recognizing which command a user is giving or the emotion + of a statement, as well as identifying a speaker.\",\"widgetModels\":[\"MIT/ast-finetuned-audioset-10-10-0.4593\"],\"youtubeId\":\"KWwzcmG98Ds\",\"id\":\"audio-classification\",\"label\":\"Audio + Classification\",\"libraries\":[\"speechbrain\",\"transformers\",\"transformers.js\"]},\"audio-to-audio\":{\"datasets\":[{\"description\":\"512-element + X-vector embeddings of speakers from CMU ARCTIC dataset.\",\"id\":\"Matthijs/cmu-arctic-xvectors\"}],\"demo\":{\"inputs\":[{\"filename\":\"input.wav\",\"type\":\"audio\"}],\"outputs\":[{\"filename\":\"label-0.wav\",\"type\":\"audio\"},{\"filename\":\"label-1.wav\",\"type\":\"audio\"}]},\"metrics\":[{\"description\":\"The + Signal-to-Noise ratio is the relationship between the target signal level + and the background noise level. It is calculated as the logarithm of the target + signal divided by the background noise, in decibels.\",\"id\":\"snri\"},{\"description\":\"The + Signal-to-Distortion ratio is the relationship between the target signal and + the sum of noise, interference, and artifact errors\",\"id\":\"sdri\"}],\"models\":[{\"description\":\"A + solid model of audio source separation.\",\"id\":\"speechbrain/sepformer-wham\"},{\"description\":\"A + speech enhancement model.\",\"id\":\"ResembleAI/resemble-enhance\"},{\"description\":\"A + model that can change the voice in a speech recording.\",\"id\":\"microsoft/speecht5_vc\"}],\"spaces\":[{\"description\":\"An + application for speech separation.\",\"id\":\"younver/speechbrain-speech-separation\"},{\"description\":\"An + application for audio style transfer.\",\"id\":\"nakas/audio-diffusion_style_transfer\"}],\"summary\":\"Audio-to-Audio + is a family of tasks in which the input is an audio and the output is one + or multiple generated audios. Some example tasks are speech enhancement and + source separation.\",\"widgetModels\":[\"speechbrain/sepformer-wham\"],\"youtubeId\":\"iohj7nCCYoM\",\"id\":\"audio-to-audio\",\"label\":\"Audio-to-Audio\",\"libraries\":[\"asteroid\",\"fairseq\",\"speechbrain\"]},\"automatic-speech-recognition\":{\"datasets\":[{\"description\":\"31,175 + hours of multilingual audio-text dataset in 108 languages.\",\"id\":\"mozilla-foundation/common_voice_17_0\"},{\"description\":\"A + dataset with 44.6k hours of English speaker data and 6k hours of other language + speakers.\",\"id\":\"parler-tts/mls_eng\"},{\"description\":\"A multi-lingual + audio dataset with 370K hours of audio.\",\"id\":\"espnet/yodas\"}],\"demo\":{\"inputs\":[{\"filename\":\"input.flac\",\"type\":\"audio\"}],\"outputs\":[{\"label\":\"Transcript\",\"content\":\"Going + along slushy country roads and speaking to damp audiences in...\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"wer\"},{\"description\":\"\",\"id\":\"cer\"}],\"models\":[{\"description\":\"A + powerful ASR model by OpenAI.\",\"id\":\"openai/whisper-large-v3\"},{\"description\":\"A + good generic speech model by MetaAI for fine-tuning.\",\"id\":\"facebook/w2v-bert-2.0\"},{\"description\":\"An + end-to-end model that performs ASR and Speech Translation by MetaAI.\",\"id\":\"facebook/seamless-m4t-v2-large\"},{\"description\":\"Powerful + speaker diarization model.\",\"id\":\"pyannote/speaker-diarization-3.1\"}],\"spaces\":[{\"description\":\"A + powerful general-purpose speech recognition application.\",\"id\":\"hf-audio/whisper-large-v3\"},{\"description\":\"Fastest + speech recognition application.\",\"id\":\"sanchit-gandhi/whisper-jax\"},{\"description\":\"A + high quality speech and text translation model by Meta.\",\"id\":\"facebook/seamless_m4t\"}],\"summary\":\"Automatic + Speech Recognition (ASR), also known as Speech to Text (STT), is the task + of transcribing a given audio to text. It has many applications, such as voice + user interfaces.\",\"widgetModels\":[\"openai/whisper-large-v3\"],\"youtubeId\":\"TksaY_FDgnk\",\"id\":\"automatic-speech-recognition\",\"label\":\"Automatic + Speech Recognition\",\"libraries\":[\"espnet\",\"nemo\",\"speechbrain\",\"transformers\",\"transformers.js\"]},\"depth-estimation\":{\"datasets\":[{\"description\":\"NYU + Depth V2 Dataset: Video dataset containing both RGB and depth sensor data.\",\"id\":\"sayakpaul/nyu_depth_v2\"},{\"description\":\"Monocular + depth estimation benchmark based without noise and errors.\",\"id\":\"depth-anything/DA-2K\"}],\"demo\":{\"inputs\":[{\"filename\":\"depth-estimation-input.jpg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"depth-estimation-output.png\",\"type\":\"img\"}]},\"metrics\":[],\"models\":[{\"description\":\"Cutting-edge + depth estimation model.\",\"id\":\"depth-anything/Depth-Anything-V2-Large\"},{\"description\":\"A + strong monocular depth estimation model.\",\"id\":\"jingheya/lotus-depth-g-v1-0\"},{\"description\":\"A + depth estimation model that predicts depth in videos.\",\"id\":\"tencent/DepthCrafter\"},{\"description\":\"A + robust depth estimation model.\",\"id\":\"apple/DepthPro\"}],\"spaces\":[{\"description\":\"An + application that predicts the depth of an image and then reconstruct the 3D + model as voxels.\",\"id\":\"radames/dpt-depth-estimation-3d-voxels\"},{\"description\":\"An + application for bleeding-edge depth estimation.\",\"id\":\"akhaliq/depth-pro\"},{\"description\":\"An + application on cutting-edge depth estimation in videos.\",\"id\":\"tencent/DepthCrafter\"},{\"description\":\"A + human-centric depth estimation application.\",\"id\":\"facebook/sapiens-depth\"}],\"summary\":\"Depth + estimation is the task of predicting depth of the objects present in an image.\",\"widgetModels\":[\"\"],\"youtubeId\":\"\",\"id\":\"depth-estimation\",\"label\":\"Depth + Estimation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"document-question-answering\":{\"datasets\":[{\"description\":\"Largest + document understanding dataset.\",\"id\":\"HuggingFaceM4/Docmatix\"},{\"description\":\"Dataset + from the 2020 DocVQA challenge. The documents are taken from the UCSF Industry + Documents Library.\",\"id\":\"eliolio/docvqa\"}],\"demo\":{\"inputs\":[{\"label\":\"Question\",\"content\":\"What + is the idea behind the consumer relations efficiency team?\",\"type\":\"text\"},{\"filename\":\"document-question-answering-input.png\",\"type\":\"img\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"Balance + cost efficiency with quality customer service\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"The + evaluation metric for the DocVQA challenge is the Average Normalized Levenshtein + Similarity (ANLS). This metric is flexible to character regognition errors + and compares the predicted answer with the ground truth answer.\",\"id\":\"anls\"},{\"description\":\"Exact + Match is a metric based on the strict character match of the predicted answer + and the right answer. For answers predicted correctly, the Exact Match will + be 1. Even if only one character is different, Exact Match will be 0\",\"id\":\"exact-match\"}],\"models\":[{\"description\":\"A + robust document question answering model.\",\"id\":\"impira/layoutlm-document-qa\"},{\"description\":\"A + document question answering model specialized in invoices.\",\"id\":\"impira/layoutlm-invoices\"},{\"description\":\"A + special model for OCR-free document question answering.\",\"id\":\"microsoft/udop-large\"},{\"description\":\"A + powerful model for document question answering.\",\"id\":\"google/pix2struct-docvqa-large\"}],\"spaces\":[{\"description\":\"A + robust document question answering application.\",\"id\":\"impira/docquery\"},{\"description\":\"An + application that can answer questions from invoices.\",\"id\":\"impira/invoices\"},{\"description\":\"An + application to compare different document question answering models.\",\"id\":\"merve/compare_docvqa_models\"}],\"summary\":\"Document + Question Answering (also known as Document Visual Question Answering) is the + task of answering questions on document images. Document question answering + models take a (document, question) pair as input and return an answer in natural + language. Models usually rely on multi-modal features, combining text, position + of words (bounding-boxes) and image.\",\"widgetModels\":[\"impira/layoutlm-invoices\"],\"youtubeId\":\"\",\"id\":\"document-question-answering\",\"label\":\"Document + Question Answering\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"feature-extraction\":{\"datasets\":[{\"description\":\"Wikipedia + dataset containing cleaned articles of all languages. Can be used to train + `feature-extraction` models.\",\"id\":\"wikipedia\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"India, + officially the Republic of India, is a country in South Asia.\",\"type\":\"text\"}],\"outputs\":[{\"table\":[[\"Dimension + 1\",\"Dimension 2\",\"Dimension 3\"],[\"2.583383083343506\",\"2.757075071334839\",\"0.9023529887199402\"],[\"8.29393482208252\",\"1.1071064472198486\",\"2.03399395942688\"],[\"-0.7754912972450256\",\"-1.647324562072754\",\"-0.6113331913948059\"],[\"0.07087723910808563\",\"1.5942802429199219\",\"1.4610432386398315\"]],\"type\":\"tabular\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + powerful feature extraction model for natural language processing tasks.\",\"id\":\"thenlper/gte-large\"},{\"description\":\"A + strong feature extraction model for retrieval.\",\"id\":\"Alibaba-NLP/gte-Qwen1.5-7B-instruct\"}],\"spaces\":[{\"description\":\"A + leaderboard to rank text feature extraction models based on a benchmark.\",\"id\":\"mteb/leaderboard\"},{\"description\":\"A + leaderboard to rank best feature extraction models based on human feedback.\",\"id\":\"mteb/arena\"}],\"summary\":\"Feature + extraction is the task of extracting features learnt in a model.\",\"widgetModels\":[\"facebook/bart-base\"],\"id\":\"feature-extraction\",\"label\":\"Feature + Extraction\",\"libraries\":[\"sentence-transformers\",\"transformers\",\"transformers.js\"]},\"fill-mask\":{\"datasets\":[{\"description\":\"A + common dataset that is used to train models for many languages.\",\"id\":\"wikipedia\"},{\"description\":\"A + large English dataset with text crawled from the web.\",\"id\":\"c4\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"The + barked at me\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"wolf\",\"score\":0.487},{\"label\":\"dog\",\"score\":0.061},{\"label\":\"cat\",\"score\":0.058},{\"label\":\"fox\",\"score\":0.047},{\"label\":\"squirrel\",\"score\":0.025}]}]},\"metrics\":[{\"description\":\"Cross + Entropy is a metric that calculates the difference between two probability + distributions. Each probability distribution is the distribution of predicted + words\",\"id\":\"cross_entropy\"},{\"description\":\"Perplexity is the exponential + of the cross-entropy loss. It evaluates the probabilities assigned to the + next word by the model. Lower perplexity indicates better performance\",\"id\":\"perplexity\"}],\"models\":[{\"description\":\"The + famous BERT model.\",\"id\":\"google-bert/bert-base-uncased\"},{\"description\":\"A + multilingual model trained on 100 languages.\",\"id\":\"FacebookAI/xlm-roberta-base\"}],\"spaces\":[],\"summary\":\"Masked + language modeling is the task of masking some of the words in a sentence and + predicting which words should replace those masks. These models are useful + when we want to get a statistical understanding of the language in which the + model is trained in.\",\"widgetModels\":[\"distilroberta-base\"],\"youtubeId\":\"mqElG5QJWUg\",\"id\":\"fill-mask\",\"label\":\"Fill-Mask\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"image-classification\":{\"datasets\":[{\"description\":\"Benchmark + dataset used for image classification with images that belong to 100 classes.\",\"id\":\"cifar100\"},{\"description\":\"Dataset + consisting of images of garments.\",\"id\":\"fashion_mnist\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-classification-input.jpeg\",\"type\":\"img\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Egyptian + cat\",\"score\":0.514},{\"label\":\"Tabby cat\",\"score\":0.193},{\"label\":\"Tiger + cat\",\"score\":0.068}]}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + strong image classification model.\",\"id\":\"google/vit-base-patch16-224\"},{\"description\":\"A + robust image classification model.\",\"id\":\"facebook/deit-base-distilled-patch16-224\"},{\"description\":\"A + strong image classification model.\",\"id\":\"facebook/convnext-large-224\"}],\"spaces\":[{\"description\":\"An + application that classifies what a given image is about.\",\"id\":\"nielsr/perceiver-image-classification\"}],\"summary\":\"Image + classification is the task of assigning a label or class to an entire image. + Images are expected to have only one class for each image. Image classification + models take an image as input and return a prediction about which class the + image belongs to.\",\"widgetModels\":[\"google/vit-base-patch16-224\"],\"youtubeId\":\"tjAIM7BOYhw\",\"id\":\"image-classification\",\"label\":\"Image + Classification\",\"libraries\":[\"keras\",\"timm\",\"transformers\",\"transformers.js\"]},\"image-feature-extraction\":{\"datasets\":[{\"description\":\"ImageNet-1K + is a image classification dataset in which images are used to train image-feature-extraction + models.\",\"id\":\"imagenet-1k\"}],\"demo\":{\"inputs\":[{\"filename\":\"mask-generation-input.png\",\"type\":\"img\"}],\"outputs\":[{\"table\":[[\"Dimension + 1\",\"Dimension 2\",\"Dimension 3\"],[\"0.21236686408519745\",\"1.0919708013534546\",\"0.8512550592422485\"],[\"0.809657871723175\",\"-0.18544459342956543\",\"-0.7851548194885254\"],[\"1.3103108406066895\",\"-0.2479034662246704\",\"-0.9107287526130676\"],[\"1.8536205291748047\",\"-0.36419737339019775\",\"0.09717650711536407\"]],\"type\":\"tabular\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + powerful image feature extraction model.\",\"id\":\"timm/vit_large_patch14_dinov2.lvd142m\"},{\"description\":\"A + strong image feature extraction model.\",\"id\":\"nvidia/MambaVision-T-1K\"},{\"description\":\"A + robust image feature extraction model.\",\"id\":\"facebook/dino-vitb16\"},{\"description\":\"Strong + image feature extraction model made for information retrieval from documents.\",\"id\":\"vidore/colpali\"},{\"description\":\"Strong + image feature extraction model that can be used on images and documents.\",\"id\":\"OpenGVLab/InternViT-6B-448px-V1-2\"}],\"spaces\":[],\"summary\":\"Image + feature extraction is the task of extracting features learnt in a computer + vision model.\",\"widgetModels\":[],\"id\":\"image-feature-extraction\",\"label\":\"Image + Feature Extraction\",\"libraries\":[\"timm\",\"transformers\"]},\"image-segmentation\":{\"datasets\":[{\"description\":\"Scene + segmentation dataset.\",\"id\":\"scene_parse_150\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-segmentation-input.jpeg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"image-segmentation-output.png\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"Average + Precision (AP) is the Area Under the PR Curve (AUC-PR). It is calculated for + each semantic class separately\",\"id\":\"Average Precision\"},{\"description\":\"Mean + Average Precision (mAP) is the overall average of the AP values\",\"id\":\"Mean + Average Precision\"},{\"description\":\"Intersection over Union (IoU) is the + overlap of segmentation masks. Mean IoU is the average of the IoU of all semantic + classes\",\"id\":\"Mean Intersection over Union\"},{\"description\":\"AP\u03B1 + is the Average Precision at the IoU threshold of a \u03B1 value, for example, + AP50 and AP75\",\"id\":\"AP\u03B1\"}],\"models\":[{\"description\":\"Solid + semantic segmentation model trained on ADE20k.\",\"id\":\"openmmlab/upernet-convnext-small\"},{\"description\":\"Background + removal model.\",\"id\":\"briaai/RMBG-1.4\"},{\"description\":\"A multipurpose + image segmentation model for high resolution images.\",\"id\":\"ZhengPeng7/BiRefNet\"},{\"description\":\"Powerful + human-centric image segmentation model.\",\"id\":\"facebook/sapiens-seg-1b\"},{\"description\":\"Panoptic + segmentation model trained on the COCO (common objects) dataset.\",\"id\":\"facebook/mask2former-swin-large-coco-panoptic\"}],\"spaces\":[{\"description\":\"A + semantic segmentation application that can predict unseen instances out of + the box.\",\"id\":\"facebook/ov-seg\"},{\"description\":\"One of the strongest + segmentation applications.\",\"id\":\"jbrinkma/segment-anything\"},{\"description\":\"A + human-centric segmentation model.\",\"id\":\"facebook/sapiens-pose\"},{\"description\":\"An + instance segmentation application to predict neuronal cell types from microscopy + images.\",\"id\":\"rashmi/sartorius-cell-instance-segmentation\"},{\"description\":\"An + application that segments videos.\",\"id\":\"ArtGAN/Segment-Anything-Video\"},{\"description\":\"An + panoptic segmentation application built for outdoor environments.\",\"id\":\"segments/panoptic-segment-anything\"}],\"summary\":\"Image + Segmentation divides an image into segments where each pixel in the image + is mapped to an object. This task has multiple variants such as instance segmentation, + panoptic segmentation and semantic segmentation.\",\"widgetModels\":[\"nvidia/segformer-b0-finetuned-ade-512-512\"],\"youtubeId\":\"dKE8SIt9C-w\",\"id\":\"image-segmentation\",\"label\":\"Image + Segmentation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"image-to-image\":{\"datasets\":[{\"description\":\"Synthetic + dataset, for image relighting\",\"id\":\"VIDIT\"},{\"description\":\"Multiple + images of celebrities, used for facial expression translation\",\"id\":\"huggan/CelebA-faces\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-to-image-input.jpeg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"image-to-image-output.png\",\"type\":\"img\"}]},\"isPlaceholder\":false,\"metrics\":[{\"description\":\"Peak + Signal to Noise Ratio (PSNR) is an approximation of the human perception, + considering the ratio of the absolute intensity with respect to the variations. + Measured in dB, a high value indicates a high fidelity.\",\"id\":\"PSNR\"},{\"description\":\"Structural + Similarity Index (SSIM) is a perceptual metric which compares the luminance, + contrast and structure of two images. The values of SSIM range between -1 + and 1, and higher values indicate closer resemblance to the original image.\",\"id\":\"SSIM\"},{\"description\":\"Inception + Score (IS) is an analysis of the labels predicted by an image classification + model when presented with a sample of the generated images.\",\"id\":\"IS\"}],\"models\":[{\"description\":\"An + image-to-image model to improve image resolution.\",\"id\":\"fal/AuraSR-v2\"},{\"description\":\"A + model that increases the resolution of an image.\",\"id\":\"keras-io/super-resolution\"},{\"description\":\"A + model that creates a set of variations of the input image in the style of + DALL-E using Stable Diffusion.\",\"id\":\"lambdalabs/sd-image-variations-diffusers\"},{\"description\":\"A + model that generates images based on segments in the input image and the text + prompt.\",\"id\":\"mfidabel/controlnet-segment-anything\"},{\"description\":\"A + model that takes an image and an instruction to edit the image.\",\"id\":\"timbrooks/instruct-pix2pix\"}],\"spaces\":[{\"description\":\"Image + enhancer application for low light.\",\"id\":\"keras-io/low-light-image-enhancement\"},{\"description\":\"Style + transfer application.\",\"id\":\"keras-io/neural-style-transfer\"},{\"description\":\"An + application that generates images based on segment control.\",\"id\":\"mfidabel/controlnet-segment-anything\"},{\"description\":\"Image + generation application that takes image control and text prompt.\",\"id\":\"hysts/ControlNet\"},{\"description\":\"Colorize + any image using this app.\",\"id\":\"ioclab/brightness-controlnet\"},{\"description\":\"Edit + images with instructions.\",\"id\":\"timbrooks/instruct-pix2pix\"}],\"summary\":\"Image-to-image + is the task of transforming an input image through a variety of possible manipulations + and enhancements, such as super-resolution, image inpainting, colorization, + and more.\",\"widgetModels\":[\"stabilityai/stable-diffusion-2-inpainting\"],\"youtubeId\":\"\",\"id\":\"image-to-image\",\"label\":\"Image-to-Image\",\"libraries\":[\"diffusers\",\"transformers\",\"transformers.js\"]},\"image-text-to-text\":{\"datasets\":[{\"description\":\"Instructions + composed of image and text.\",\"id\":\"liuhaotian/LLaVA-Instruct-150K\"},{\"description\":\"Conversation + turns where questions involve image and text.\",\"id\":\"liuhaotian/LLaVA-Pretrain\"},{\"description\":\"A + collection of datasets made for model fine-tuning.\",\"id\":\"HuggingFaceM4/the_cauldron\"},{\"description\":\"Screenshots + of websites with their HTML/CSS codes.\",\"id\":\"HuggingFaceM4/WebSight\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-text-to-text-input.png\",\"type\":\"img\"},{\"label\":\"Text + Prompt\",\"content\":\"Describe the position of the bee in detail.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"The + bee is sitting on a pink flower, surrounded by other flowers. The bee is positioned + in the center of the flower, with its head and front legs sticking out.\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"Powerful + vision language model with great visual understanding and reasoning capabilities.\",\"id\":\"meta-llama/Llama-3.2-11B-Vision-Instruct\"},{\"description\":\"Cutting-edge + vision language models.\",\"id\":\"allenai/Molmo-7B-D-0924\"},{\"description\":\"Small + yet powerful model.\",\"id\":\"vikhyatk/moondream2\"},{\"description\":\"Strong + image-text-to-text model.\",\"id\":\"Qwen/Qwen2-VL-7B-Instruct\"},{\"description\":\"Strong + image-text-to-text model.\",\"id\":\"mistralai/Pixtral-12B-2409\"},{\"description\":\"Strong + image-text-to-text model focused on documents.\",\"id\":\"stepfun-ai/GOT-OCR2_0\"}],\"spaces\":[{\"description\":\"Leaderboard + to evaluate vision language models.\",\"id\":\"opencompass/open_vlm_leaderboard\"},{\"description\":\"Vision + language models arena, where models are ranked by votes of users.\",\"id\":\"WildVision/vision-arena\"},{\"description\":\"Powerful + vision-language model assistant.\",\"id\":\"akhaliq/Molmo-7B-D-0924\"},{\"description\":\"An + image-text-to-text application focused on documents.\",\"id\":\"stepfun-ai/GOT_official_online_demo\"},{\"description\":\"An + application to compare outputs of different vision language models.\",\"id\":\"merve/compare_VLMs\"},{\"description\":\"An + application for chatting with an image-text-to-text model.\",\"id\":\"GanymedeNil/Qwen2-VL-7B\"}],\"summary\":\"Image-text-to-text + models take in an image and text prompt and output text. These models are + also called vision-language models, or VLMs. The difference from image-to-text + models is that these models take an additional text input, not restricting + the model to certain use cases like image captioning, and may also be trained + to accept a conversation as input.\",\"widgetModels\":[\"meta-llama/Llama-3.2-11B-Vision-Instruct\"],\"youtubeId\":\"IoGaGfU1CIg\",\"id\":\"image-text-to-text\",\"label\":\"Image-Text-to-Text\",\"libraries\":[\"transformers\"]},\"image-to-text\":{\"datasets\":[{\"description\":\"Dataset + from 12M image-text of Reddit\",\"id\":\"red_caps\"},{\"description\":\"Dataset + from 3.3M images of Google\",\"id\":\"datasets/conceptual_captions\"}],\"demo\":{\"inputs\":[{\"filename\":\"savanna.jpg\",\"type\":\"img\"}],\"outputs\":[{\"label\":\"Detailed + description\",\"content\":\"a herd of giraffes and zebras grazing in a field\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + robust image captioning model.\",\"id\":\"Salesforce/blip2-opt-2.7b\"},{\"description\":\"A + powerful and accurate image-to-text model that can also localize concepts + in images.\",\"id\":\"microsoft/kosmos-2-patch14-224\"},{\"description\":\"A + strong optical character recognition model.\",\"id\":\"facebook/nougat-base\"},{\"description\":\"A + powerful model that lets you have a conversation with the image.\",\"id\":\"llava-hf/llava-1.5-7b-hf\"}],\"spaces\":[{\"description\":\"An + application that compares various image captioning models.\",\"id\":\"nielsr/comparing-captioning-models\"},{\"description\":\"A + robust image captioning application.\",\"id\":\"flax-community/image-captioning\"},{\"description\":\"An + application that transcribes handwritings into text.\",\"id\":\"nielsr/TrOCR-handwritten\"},{\"description\":\"An + application that can caption images and answer questions about a given image.\",\"id\":\"Salesforce/BLIP\"},{\"description\":\"An + application that can caption images and answer questions with a conversational + agent.\",\"id\":\"Salesforce/BLIP2\"},{\"description\":\"An image captioning + application that demonstrates the effect of noise on captions.\",\"id\":\"johko/capdec-image-captioning\"}],\"summary\":\"Image + to text models output a text from a given image. Image captioning or optical + character recognition can be considered as the most common applications of + image to text.\",\"widgetModels\":[\"Salesforce/blip-image-captioning-large\"],\"youtubeId\":\"\",\"id\":\"image-to-text\",\"label\":\"Image-to-Text\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"keypoint-detection\":{\"datasets\":[{\"description\":\"A + dataset of hand keypoints of over 500k examples.\",\"id\":\"Vincent-luo/hagrid-mediapipe-hands\"}],\"demo\":{\"inputs\":[{\"filename\":\"keypoint-detection-input.png\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"keypoint-detection-output.png\",\"type\":\"img\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + robust keypoint detection model.\",\"id\":\"magic-leap-community/superpoint\"},{\"description\":\"Strong + keypoint detection model used to detect human pose.\",\"id\":\"facebook/sapiens-pose-1b\"}],\"spaces\":[{\"description\":\"An + application that detects hand keypoints in real-time.\",\"id\":\"datasciencedojo/Hand-Keypoint-Detection-Realtime\"},{\"description\":\"An + application to try a universal keypoint detection model.\",\"id\":\"merve/SuperPoint\"}],\"summary\":\"Keypoint + detection is the task of identifying meaningful distinctive points or features + in an image.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"keypoint-detection\",\"label\":\"Keypoint + Detection\",\"libraries\":[\"transformers\"]},\"mask-generation\":{\"datasets\":[],\"demo\":{\"inputs\":[{\"filename\":\"mask-generation-input.png\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"mask-generation-output.png\",\"type\":\"img\"}]},\"metrics\":[],\"models\":[{\"description\":\"Small + yet powerful mask generation model.\",\"id\":\"Zigeng/SlimSAM-uniform-50\"},{\"description\":\"Very + strong mask generation model.\",\"id\":\"facebook/sam2-hiera-large\"}],\"spaces\":[{\"description\":\"An + application that combines a mask generation model with a zero-shot object + detection model for text-guided image segmentation.\",\"id\":\"merve/OWLSAM2\"},{\"description\":\"An + application that compares the performance of a large and a small mask generation + model.\",\"id\":\"merve/slimsam\"},{\"description\":\"An application based + on an improved mask generation model.\",\"id\":\"SkalskiP/segment-anything-model-2\"},{\"description\":\"An + application to remove objects from videos using mask generation models.\",\"id\":\"SkalskiP/SAM_and_ProPainter\"}],\"summary\":\"Mask + generation is the task of generating masks that identify a specific object + or region of interest in a given image. Masks are often used in segmentation + tasks, where they provide a precise way to isolate the object of interest + for further processing or analysis.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"mask-generation\",\"label\":\"Mask + Generation\",\"libraries\":[\"transformers\"]},\"object-detection\":{\"datasets\":[{\"description\":\"Widely + used benchmark dataset for multiple vision tasks.\",\"id\":\"merve/coco2017\"},{\"description\":\"Multi-task + computer vision benchmark.\",\"id\":\"merve/pascal-voc\"}],\"demo\":{\"inputs\":[{\"filename\":\"object-detection-input.jpg\",\"type\":\"img\"}],\"outputs\":[{\"filename\":\"object-detection-output.jpg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + Average Precision (AP) metric is the Area Under the PR Curve (AUC-PR). It + is calculated for each class separately\",\"id\":\"Average Precision\"},{\"description\":\"The + Mean Average Precision (mAP) metric is the overall average of the AP values\",\"id\":\"Mean + Average Precision\"},{\"description\":\"The AP\u03B1 metric is the Average + Precision at the IoU threshold of a \u03B1 value, for example, AP50 and AP75\",\"id\":\"AP\u03B1\"}],\"models\":[{\"description\":\"Solid + object detection model pre-trained on the COCO 2017 dataset.\",\"id\":\"facebook/detr-resnet-50\"},{\"description\":\"Real-time + and accurate object detection model.\",\"id\":\"jameslahm/yolov10x\"},{\"description\":\"Fast + and accurate object detection model trained on COCO and Object365 datasets.\",\"id\":\"PekingU/rtdetr_r18vd_coco_o365\"}],\"spaces\":[{\"description\":\"Leaderboard + to compare various object detection models across several metrics.\",\"id\":\"hf-vision/object_detection_leaderboard\"},{\"description\":\"An + application that contains various object detection models to try from.\",\"id\":\"Gradio-Blocks/Object-Detection-With-DETR-and-YOLOS\"},{\"description\":\"An + application that shows multiple cutting edge techniques for object detection + and tracking.\",\"id\":\"kadirnar/torchyolo\"},{\"description\":\"An object + tracking, segmentation and inpainting application.\",\"id\":\"VIPLab/Track-Anything\"},{\"description\":\"Very + fast object tracking application based on object detection.\",\"id\":\"merve/RT-DETR-tracking-coco\"}],\"summary\":\"Object + Detection models allow users to identify objects of certain defined classes. + Object detection models receive an image as input and output the images with + bounding boxes and labels on detected objects.\",\"widgetModels\":[\"facebook/detr-resnet-50\"],\"youtubeId\":\"WdAeKSOpxhw\",\"id\":\"object-detection\",\"label\":\"Object + Detection\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"video-classification\":{\"datasets\":[{\"description\":\"Benchmark + dataset used for video classification with videos that belong to 400 classes.\",\"id\":\"kinetics400\"}],\"demo\":{\"inputs\":[{\"filename\":\"video-classification-input.gif\",\"type\":\"img\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Playing + Guitar\",\"score\":0.514},{\"label\":\"Playing Tennis\",\"score\":0.193},{\"label\":\"Cooking\",\"score\":0.068}]}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"Strong + Video Classification model trained on the Kinetics 400 dataset.\",\"id\":\"google/vivit-b-16x2-kinetics400\"},{\"description\":\"Strong + Video Classification model trained on the Kinetics 400 dataset.\",\"id\":\"microsoft/xclip-base-patch32\"}],\"spaces\":[{\"description\":\"An + application that classifies video at different timestamps.\",\"id\":\"nateraw/lavila\"},{\"description\":\"An + application that classifies video.\",\"id\":\"fcakyon/video-classification\"}],\"summary\":\"Video + classification is the task of assigning a label or class to an entire video. + Videos are expected to have only one class for each video. Video classification + models take a video as input and return a prediction about which class the + video belongs to.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"video-classification\",\"label\":\"Video + Classification\",\"libraries\":[\"transformers\"]},\"question-answering\":{\"datasets\":[{\"description\":\"A + famous question answering dataset based on English articles from Wikipedia.\",\"id\":\"squad_v2\"},{\"description\":\"A + dataset of aggregated anonymized actual queries issued to the Google search + engine.\",\"id\":\"natural_questions\"}],\"demo\":{\"inputs\":[{\"label\":\"Question\",\"content\":\"Which + name is also used to describe the Amazon rainforest in English?\",\"type\":\"text\"},{\"label\":\"Context\",\"content\":\"The + Amazon rainforest, also known in English as Amazonia or the Amazon Jungle\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"Amazonia\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Exact + Match is a metric based on the strict character match of the predicted answer + and the right answer. For answers predicted correctly, the Exact Match will + be 1. Even if only one character is different, Exact Match will be 0\",\"id\":\"exact-match\"},{\"description\":\" + The F1-Score metric is useful if we value both false positives and false negatives + equally. The F1-Score is calculated on each word in the predicted sequence + against the correct answer\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + robust baseline model for most question answering domains.\",\"id\":\"deepset/roberta-base-squad2\"},{\"description\":\"Small + yet robust model that can answer questions.\",\"id\":\"distilbert/distilbert-base-cased-distilled-squad\"},{\"description\":\"A + special model that can answer questions from tables.\",\"id\":\"google/tapas-base-finetuned-wtq\"}],\"spaces\":[{\"description\":\"An + application that can answer a long question from Wikipedia.\",\"id\":\"deepset/wikipedia-assistant\"}],\"summary\":\"Question + Answering models can retrieve the answer to a question from a given text, + which is useful for searching for an answer in a document. Some question answering + models can generate answers without context!\",\"widgetModels\":[\"deepset/roberta-base-squad2\"],\"youtubeId\":\"ajPx5LwJD-I\",\"id\":\"question-answering\",\"label\":\"Question + Answering\",\"libraries\":[\"adapter-transformers\",\"allennlp\",\"transformers\",\"transformers.js\"]},\"reinforcement-learning\":{\"datasets\":[{\"description\":\"A + curation of widely used datasets for Data Driven Deep Reinforcement Learning + (D4RL)\",\"id\":\"edbeeching/decision_transformer_gym_replay\"}],\"demo\":{\"inputs\":[{\"label\":\"State\",\"content\":\"Red + traffic light, pedestrians are about to pass.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Action\",\"content\":\"Stop + the car.\",\"type\":\"text\"},{\"label\":\"Next State\",\"content\":\"Yellow + light, pedestrians have crossed.\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Accumulated + reward across all time steps discounted by a factor that ranges between 0 + and 1 and determines how much the agent optimizes for future relative to immediate + rewards. Measures how good is the policy ultimately found by a given algorithm + considering uncertainty over the future.\",\"id\":\"Discounted Total Reward\"},{\"description\":\"Average + return obtained after running the policy for a certain number of evaluation + episodes. As opposed to total reward, mean reward considers how much reward + a given algorithm receives while learning.\",\"id\":\"Mean Reward\"},{\"description\":\"Measures + how good a given algorithm is after a predefined time. Some algorithms may + be guaranteed to converge to optimal behavior across many time steps. However, + an agent that reaches an acceptable level of optimality after a given time + horizon may be preferable to one that ultimately reaches optimality but takes + a long time.\",\"id\":\"Level of Performance After Some Time\"}],\"models\":[{\"description\":\"A + Reinforcement Learning model trained on expert data from the Gym Hopper environment\",\"id\":\"edbeeching/decision-transformer-gym-hopper-expert\"},{\"description\":\"A + PPO agent playing seals/CartPole-v0 using the stable-baselines3 library and + the RL Zoo.\",\"id\":\"HumanCompatibleAI/ppo-seals-CartPole-v0\"}],\"spaces\":[{\"description\":\"An + application for a cute puppy agent learning to catch a stick.\",\"id\":\"ThomasSimonini/Huggy\"},{\"description\":\"An + application to play Snowball Fight with a reinforcement learning agent.\",\"id\":\"ThomasSimonini/SnowballFight\"}],\"summary\":\"Reinforcement + learning is the computational approach of learning from action by interacting + with an environment through trial and error and receiving rewards (negative + or positive) as feedback\",\"widgetModels\":[],\"youtubeId\":\"q0BiUn5LiBc\",\"id\":\"reinforcement-learning\",\"label\":\"Reinforcement + Learning\",\"libraries\":[\"transformers\",\"stable-baselines3\",\"ml-agents\",\"sample-factory\"]},\"sentence-similarity\":{\"datasets\":[{\"description\":\"Bing + queries with relevant passages from various web sources.\",\"id\":\"ms_marco\"}],\"demo\":{\"inputs\":[{\"label\":\"Source + sentence\",\"content\":\"Machine learning is so easy.\",\"type\":\"text\"},{\"label\":\"Sentences + to compare to\",\"content\":\"Deep learning is so straightforward.\",\"type\":\"text\"},{\"label\":\"\",\"content\":\"This + is so difficult, like rocket science.\",\"type\":\"text\"},{\"label\":\"\",\"content\":\"I + can't believe how much I struggled with this.\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Deep + learning is so straightforward.\",\"score\":0.623},{\"label\":\"This is so + difficult, like rocket science.\",\"score\":0.413},{\"label\":\"I can't believe + how much I struggled with this.\",\"score\":0.256}]}]},\"metrics\":[{\"description\":\"Reciprocal + Rank is a measure used to rank the relevancy of documents given a set of documents. + Reciprocal Rank is the reciprocal of the rank of the document retrieved, meaning, + if the rank is 3, the Reciprocal Rank is 0.33. If the rank is 1, the Reciprocal + Rank is 1\",\"id\":\"Mean Reciprocal Rank\"},{\"description\":\"The similarity + of the embeddings is evaluated mainly on cosine similarity. It is calculated + as the cosine of the angle between two vectors. It is particularly useful + when your texts are not the same length\",\"id\":\"Cosine Similarity\"}],\"models\":[{\"description\":\"This + model works well for sentences and paragraphs and can be used for clustering/grouping + and semantic searches.\",\"id\":\"sentence-transformers/all-mpnet-base-v2\"},{\"description\":\"A + multilingual robust sentence similarity model..\",\"id\":\"BAAI/bge-m3\"}],\"spaces\":[{\"description\":\"An + application that leverages sentence similarity to answer questions from YouTube + videos.\",\"id\":\"Gradio-Blocks/Ask_Questions_To_YouTube_Videos\"},{\"description\":\"An + application that retrieves relevant PubMed abstracts for a given online article + which can be used as further references.\",\"id\":\"Gradio-Blocks/pubmed-abstract-retriever\"},{\"description\":\"An + application that leverages sentence similarity to summarize text.\",\"id\":\"nickmuchi/article-text-summarizer\"},{\"description\":\"A + guide that explains how Sentence Transformers can be used for semantic search.\",\"id\":\"sentence-transformers/Sentence_Transformers_for_semantic_search\"}],\"summary\":\"Sentence + Similarity is the task of determining how similar two texts are. Sentence + similarity models convert input texts into vectors (embeddings) that capture + semantic information and calculate how close (similar) they are between them. + This task is particularly useful for information retrieval and clustering/grouping.\",\"widgetModels\":[\"BAAI/bge-small-en-v1.5\"],\"youtubeId\":\"VCZq5AkbNEU\",\"id\":\"sentence-similarity\",\"label\":\"Sentence + Similarity\",\"libraries\":[\"sentence-transformers\",\"spacy\",\"transformers.js\"]},\"summarization\":{\"canonicalId\":\"text2text-generation\",\"datasets\":[{\"description\":\"News + articles in five different languages along with their summaries. Widely used + for benchmarking multilingual summarization models.\",\"id\":\"mlsum\"},{\"description\":\"English + conversations and their summaries. Useful for benchmarking conversational + agents.\",\"id\":\"samsum\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"The + tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey + building, and the tallest structure in Paris. Its base is square, measuring + 125 metres (410 ft) on each side. It was the first structure to reach a height + of 300 metres. Excluding transmitters, the Eiffel Tower is the second tallest + free-standing structure in France after the Millau Viaduct.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Output\",\"content\":\"The + tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey + building. It was the first structure to reach a height of 300 metres.\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"The + generated sequence is compared against its summary, and the overlap of tokens + are counted. ROUGE-N refers to overlap of N subsequent tokens, ROUGE-1 refers + to overlap of single tokens and ROUGE-2 is the overlap of two subsequent tokens.\",\"id\":\"rouge\"}],\"models\":[{\"description\":\"A + strong summarization model trained on English news articles. Excels at generating + factual summaries.\",\"id\":\"facebook/bart-large-cnn\"},{\"description\":\"A + summarization model trained on medical articles.\",\"id\":\"Falconsai/medical_summarization\"}],\"spaces\":[{\"description\":\"An + application that can summarize long paragraphs.\",\"id\":\"pszemraj/summarize-long-text\"},{\"description\":\"A + much needed summarization application for terms and conditions.\",\"id\":\"ml6team/distilbart-tos-summarizer-tosdr\"},{\"description\":\"An + application that summarizes long documents.\",\"id\":\"pszemraj/document-summarization\"},{\"description\":\"An + application that can detect errors in abstractive summarization.\",\"id\":\"ml6team/post-processing-summarization\"}],\"summary\":\"Summarization + is the task of producing a shorter version of a document while preserving + its important information. Some models can extract text from the original + input, while other models can generate entirely new text.\",\"widgetModels\":[\"facebook/bart-large-cnn\"],\"youtubeId\":\"yHnr5Dk2zCI\",\"id\":\"summarization\",\"label\":\"Summarization\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"table-question-answering\":{\"datasets\":[{\"description\":\"The + WikiTableQuestions dataset is a large-scale dataset for the task of question + answering on semi-structured tables.\",\"id\":\"wikitablequestions\"},{\"description\":\"WikiSQL + is a dataset of 80654 hand-annotated examples of questions and SQL queries + distributed across 24241 tables from Wikipedia.\",\"id\":\"wikisql\"}],\"demo\":{\"inputs\":[{\"table\":[[\"Rank\",\"Name\",\"No.of + reigns\",\"Combined days\"],[\"1\",\"lou Thesz\",\"3\",\"3749\"],[\"2\",\"Ric + Flair\",\"8\",\"3103\"],[\"3\",\"Harley Race\",\"7\",\"1799\"]],\"type\":\"tabular\"},{\"label\":\"Question\",\"content\":\"What + is the number of reigns for Harley Race?\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Result\",\"content\":\"7\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Checks + whether the predicted answer(s) is the same as the ground-truth answer(s).\",\"id\":\"Denotation + Accuracy\"}],\"models\":[{\"description\":\"A table question answering model + that is capable of neural SQL execution, i.e., employ TAPEX to execute a SQL + query on a given table.\",\"id\":\"microsoft/tapex-base\"},{\"description\":\"A + robust table question answering model.\",\"id\":\"google/tapas-base-finetuned-wtq\"}],\"spaces\":[{\"description\":\"An + application that answers questions based on table CSV files.\",\"id\":\"katanaml/table-query\"}],\"summary\":\"Table + Question Answering (Table QA) is the answering a question about an information + on a given table.\",\"widgetModels\":[\"google/tapas-base-finetuned-wtq\"],\"id\":\"table-question-answering\",\"label\":\"Table + Question Answering\",\"libraries\":[\"transformers\"]},\"tabular-classification\":{\"datasets\":[{\"description\":\"A + comprehensive curation of datasets covering all benchmarks.\",\"id\":\"inria-soda/tabular-benchmark\"}],\"demo\":{\"inputs\":[{\"table\":[[\"Glucose\",\"Blood + Pressure \",\"Skin Thickness\",\"Insulin\",\"BMI\"],[\"148\",\"72\",\"35\",\"0\",\"33.6\"],[\"150\",\"50\",\"30\",\"0\",\"35.1\"],[\"141\",\"60\",\"29\",\"1\",\"39.2\"]],\"type\":\"tabular\"}],\"outputs\":[{\"table\":[[\"Diabetes\"],[\"1\"],[\"1\"],[\"0\"]],\"type\":\"tabular\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"Breast + cancer prediction model based on decision trees.\",\"id\":\"scikit-learn/cancer-prediction-trees\"}],\"spaces\":[{\"description\":\"An + application that can predict defective products on a production line.\",\"id\":\"scikit-learn/tabular-playground\"},{\"description\":\"An + application that compares various tabular classification techniques on different + datasets.\",\"id\":\"scikit-learn/classification\"}],\"summary\":\"Tabular + classification is the task of classifying a target category (a group) based + on set of attributes.\",\"widgetModels\":[\"scikit-learn/tabular-playground\"],\"youtubeId\":\"\",\"id\":\"tabular-classification\",\"label\":\"Tabular + Classification\",\"libraries\":[\"sklearn\"]},\"tabular-regression\":{\"datasets\":[{\"description\":\"A + comprehensive curation of datasets covering all benchmarks.\",\"id\":\"inria-soda/tabular-benchmark\"}],\"demo\":{\"inputs\":[{\"table\":[[\"Car + Name\",\"Horsepower\",\"Weight\"],[\"ford torino\",\"140\",\"3,449\"],[\"amc + hornet\",\"97\",\"2,774\"],[\"toyota corolla\",\"65\",\"1,773\"]],\"type\":\"tabular\"}],\"outputs\":[{\"table\":[[\"MPG + (miles per gallon)\"],[\"17\"],[\"18\"],[\"31\"]],\"type\":\"tabular\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"mse\"},{\"description\":\"Coefficient + of determination (or R-squared) is a measure of how well the model fits the + data. Higher R-squared is considered a better fit.\",\"id\":\"r-squared\"}],\"models\":[{\"description\":\"Fish + weight prediction based on length measurements and species.\",\"id\":\"scikit-learn/Fish-Weight\"}],\"spaces\":[{\"description\":\"An + application that can predict weight of a fish based on set of attributes.\",\"id\":\"scikit-learn/fish-weight-prediction\"}],\"summary\":\"Tabular + regression is the task of predicting a numerical value given a set of attributes.\",\"widgetModels\":[\"scikit-learn/Fish-Weight\"],\"youtubeId\":\"\",\"id\":\"tabular-regression\",\"label\":\"Tabular + Regression\",\"libraries\":[\"sklearn\"]},\"text-classification\":{\"datasets\":[{\"description\":\"A + widely used dataset used to benchmark multiple variants of text classification.\",\"id\":\"nyu-mll/glue\"},{\"description\":\"A + text classification dataset used to benchmark natural language inference models\",\"id\":\"stanfordnlp/snli\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"I + love Hugging Face!\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"POSITIVE\",\"score\":0.9},{\"label\":\"NEUTRAL\",\"score\":0.1},{\"label\":\"NEGATIVE\",\"score\":0}]}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"The + F1 metric is the harmonic mean of the precision and recall. It can be calculated + as: F1 = 2 * (precision * recall) / (precision + recall)\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + robust model trained for sentiment analysis.\",\"id\":\"distilbert/distilbert-base-uncased-finetuned-sst-2-english\"},{\"description\":\"A + sentiment analysis model specialized in financial sentiment.\",\"id\":\"ProsusAI/finbert\"},{\"description\":\"A + sentiment analysis model specialized in analyzing tweets.\",\"id\":\"cardiffnlp/twitter-roberta-base-sentiment-latest\"},{\"description\":\"A + model that can classify languages.\",\"id\":\"papluca/xlm-roberta-base-language-detection\"},{\"description\":\"A + model that can classify text generation attacks.\",\"id\":\"meta-llama/Prompt-Guard-86M\"}],\"spaces\":[{\"description\":\"An + application that can classify financial sentiment.\",\"id\":\"IoannisTr/Tech_Stocks_Trading_Assistant\"},{\"description\":\"A + dashboard that contains various text classification tasks.\",\"id\":\"miesnerjacob/Multi-task-NLP\"},{\"description\":\"An + application that analyzes user reviews in healthcare.\",\"id\":\"spacy/healthsea-demo\"}],\"summary\":\"Text + Classification is the task of assigning a label or class to a given text. + Some use cases are sentiment analysis, natural language inference, and assessing + grammatical correctness.\",\"widgetModels\":[\"distilbert/distilbert-base-uncased-finetuned-sst-2-english\"],\"youtubeId\":\"leNG9fN9FQU\",\"id\":\"text-classification\",\"label\":\"Text + Classification\",\"libraries\":[\"adapter-transformers\",\"setfit\",\"spacy\",\"transformers\",\"transformers.js\"]},\"text-generation\":{\"datasets\":[{\"description\":\"A + large multilingual dataset of text crawled from the web.\",\"id\":\"mc4\"},{\"description\":\"Diverse + open-source data consisting of 22 smaller high-quality datasets. It was used + to train GPT-Neo.\",\"id\":\"the_pile\"},{\"description\":\"Truly open-source, + curated and cleaned dialogue dataset.\",\"id\":\"HuggingFaceH4/ultrachat_200k\"},{\"description\":\"An + instruction dataset with preference ratings on responses.\",\"id\":\"openbmb/UltraFeedback\"},{\"description\":\"A + large synthetic dataset for alignment of text generation models.\",\"id\":\"argilla/magpie-ultra-v0.1\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"Once + upon a time,\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Output\",\"content\":\"Once + upon a time, we knew that our ancestors were on the verge of extinction. The + great explorers and poets of the Old World, from Alexander the Great to Chaucer, + are dead and gone. A good many of our ancient explorers and poets have\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"Cross + Entropy is a metric that calculates the difference between two probability + distributions. Each probability distribution is the distribution of predicted + words\",\"id\":\"Cross Entropy\"},{\"description\":\"The Perplexity metric + is the exponential of the cross-entropy loss. It evaluates the probabilities + assigned to the next word by the model. Lower perplexity indicates better + performance\",\"id\":\"Perplexity\"}],\"models\":[{\"description\":\"A text-generation + model trained to follow instructions.\",\"id\":\"google/gemma-2-2b-it\"},{\"description\":\"Very + powerful text generation model trained to follow instructions.\",\"id\":\"meta-llama/Meta-Llama-3.1-8B-Instruct\"},{\"description\":\"Small + yet powerful text generation model.\",\"id\":\"microsoft/Phi-3-mini-4k-instruct\"},{\"description\":\"A + very powerful model that can solve mathematical problems.\",\"id\":\"AI-MO/NuminaMath-7B-TIR\"},{\"description\":\"Strong + text generation model to follow instructions.\",\"id\":\"Qwen/Qwen2.5-7B-Instruct\"},{\"description\":\"Very + strong open-source large language model.\",\"id\":\"nvidia/Llama-3.1-Nemotron-70B-Instruct\"}],\"spaces\":[{\"description\":\"A + leaderboard to compare different open-source text generation models based + on various benchmarks.\",\"id\":\"open-llm-leaderboard/open_llm_leaderboard\"},{\"description\":\"A + leaderboard for comparing chain-of-thought performance of models.\",\"id\":\"logikon/open_cot_leaderboard\"},{\"description\":\"An + text generation based application based on a very powerful LLaMA2 model.\",\"id\":\"ysharma/Explore_llamav2_with_TGI\"},{\"description\":\"An + text generation based application to converse with Zephyr model.\",\"id\":\"HuggingFaceH4/zephyr-chat\"},{\"description\":\"A + leaderboard that ranks text generation models based on blind votes from people.\",\"id\":\"lmsys/chatbot-arena-leaderboard\"},{\"description\":\"An + chatbot to converse with a very powerful text generation model.\",\"id\":\"mlabonne/phixtral-chat\"}],\"summary\":\"Generating + text is the task of generating new text given another text. These models can, + for example, fill in incomplete text or paraphrase.\",\"widgetModels\":[\"mistralai/Mistral-Nemo-Instruct-2407\"],\"youtubeId\":\"e9gNEAlsOvU\",\"id\":\"text-generation\",\"label\":\"Text + Generation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"text-to-image\":{\"datasets\":[{\"description\":\"RedCaps + is a large-scale dataset of 12M image-text pairs collected from Reddit.\",\"id\":\"red_caps\"},{\"description\":\"Conceptual + Captions is a dataset consisting of ~3.3M images annotated with captions.\",\"id\":\"conceptual_captions\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"A + city above clouds, pastel colors, Victorian style\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"image.jpeg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + Inception Score (IS) measure assesses diversity and meaningfulness. It uses + a generated image sample to predict its label. A higher score signifies more + diverse and meaningful images.\",\"id\":\"IS\"},{\"description\":\"The Fr\xE9chet + Inception Distance (FID) calculates the distance between distributions between + synthetic and real samples. A lower FID score indicates better similarity + between the distributions of real and generated images.\",\"id\":\"FID\"},{\"description\":\"R-precision + assesses how the generated image aligns with the provided text description. + It uses the generated images as queries to retrieve relevant text descriptions. + The top 'r' relevant descriptions are selected and used to calculate R-precision + as r/R, where 'R' is the number of ground truth descriptions associated with + the generated images. A higher R-precision value indicates a better model.\",\"id\":\"R-Precision\"}],\"models\":[{\"description\":\"One + of the most powerful image generation models that can generate realistic outputs.\",\"id\":\"black-forest-labs/FLUX.1-dev\"},{\"description\":\"A + powerful yet fast image generation model.\",\"id\":\"latent-consistency/lcm-lora-sdxl\"},{\"description\":\"Text-to-image + model for photorealistic generation.\",\"id\":\"Kwai-Kolors/Kolors\"},{\"description\":\"A + powerful text-to-image model.\",\"id\":\"stabilityai/stable-diffusion-3-medium-diffusers\"}],\"spaces\":[{\"description\":\"A + powerful text-to-image application.\",\"id\":\"stabilityai/stable-diffusion-3-medium\"},{\"description\":\"A + text-to-image application to generate comics.\",\"id\":\"jbilcke-hf/ai-comic-factory\"},{\"description\":\"An + application to match multiple custom image generation models.\",\"id\":\"multimodalart/flux-lora-lab\"},{\"description\":\"A + powerful yet very fast image generation application.\",\"id\":\"latent-consistency/lcm-lora-for-sdxl\"},{\"description\":\"A + gallery to explore various text-to-image models.\",\"id\":\"multimodalart/LoraTheExplorer\"},{\"description\":\"An + application for `text-to-image`, `image-to-image` and image inpainting.\",\"id\":\"ArtGAN/Stable-Diffusion-ControlNet-WebUI\"},{\"description\":\"An + application to generate realistic images given photos of a person and a prompt.\",\"id\":\"InstantX/InstantID\"}],\"summary\":\"Text-to-image + is the task of generating images from input text. These pipelines can also + be used to modify and edit images based on text prompts.\",\"widgetModels\":[\"black-forest-labs/FLUX.1-dev\"],\"youtubeId\":\"\",\"id\":\"text-to-image\",\"label\":\"Text-to-Image\",\"libraries\":[\"diffusers\"]},\"text-to-speech\":{\"canonicalId\":\"text-to-audio\",\"datasets\":[{\"description\":\"10K + hours of multi-speaker English dataset.\",\"id\":\"parler-tts/mls_eng_10k\"},{\"description\":\"Multi-speaker + English dataset.\",\"id\":\"mythicinfinity/libritts_r\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"I + love audio models on the Hub!\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"audio.wav\",\"type\":\"audio\"}]},\"metrics\":[{\"description\":\"The + Mel Cepstral Distortion (MCD) metric is used to calculate the quality of generated + speech.\",\"id\":\"mel cepstral distortion\"}],\"models\":[{\"description\":\"A + powerful TTS model.\",\"id\":\"parler-tts/parler-tts-large-v1\"},{\"description\":\"A + massively multi-lingual TTS model.\",\"id\":\"coqui/XTTS-v2\"},{\"description\":\"Robust + TTS model.\",\"id\":\"metavoiceio/metavoice-1B-v0.1\"},{\"description\":\"A + prompt based, powerful TTS model.\",\"id\":\"parler-tts/parler_tts_mini_v0.1\"}],\"spaces\":[{\"description\":\"An + application for generate highly realistic, multilingual speech.\",\"id\":\"suno/bark\"},{\"description\":\"An + application on XTTS, a voice generation model that lets you clone voices into + different languages.\",\"id\":\"coqui/xtts\"},{\"description\":\"An application + that generates speech in different styles in English and Chinese.\",\"id\":\"mrfakename/E2-F5-TTS\"},{\"description\":\"An + application that synthesizes speech for diverse speaker prompts.\",\"id\":\"parler-tts/parler_tts_mini\"}],\"summary\":\"Text-to-Speech + (TTS) is the task of generating natural sounding speech given text input. + TTS models can be extended to have a single model that generates speech for + multiple speakers and multiple languages.\",\"widgetModels\":[\"suno/bark\"],\"youtubeId\":\"NW62DpzJ274\",\"id\":\"text-to-speech\",\"label\":\"Text-to-Speech\",\"libraries\":[\"espnet\",\"tensorflowtts\",\"transformers\",\"transformers.js\"]},\"text-to-video\":{\"datasets\":[{\"description\":\"Microsoft + Research Video to Text is a large-scale dataset for open domain video captioning\",\"id\":\"iejMac/CLIP-MSR-VTT\"},{\"description\":\"UCF101 + Human Actions dataset consists of 13,320 video clips from YouTube, with 101 + classes.\",\"id\":\"quchenyuan/UCF101-ZIP\"},{\"description\":\"A high-quality + dataset for human action recognition in YouTube videos.\",\"id\":\"nateraw/kinetics\"},{\"description\":\"A + dataset of video clips of humans performing pre-defined basic actions with + everyday objects.\",\"id\":\"HuggingFaceM4/something_something_v2\"},{\"description\":\"This + dataset consists of text-video pairs and contains noisy samples with irrelevant + video descriptions\",\"id\":\"HuggingFaceM4/webvid\"},{\"description\":\"A + dataset of short Flickr videos for the temporal localization of events with + descriptions.\",\"id\":\"iejMac/CLIP-DiDeMo\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"Darth + Vader is surfing on the waves.\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"text-to-video-output.gif\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"Inception + Score uses an image classification model that predicts class labels and evaluates + how distinct and diverse the images are. A higher score indicates better video + generation.\",\"id\":\"is\"},{\"description\":\"Frechet Inception Distance + uses an image classification model to obtain image embeddings. The metric + compares mean and standard deviation of the embeddings of real and generated + images. A smaller score indicates better video generation.\",\"id\":\"fid\"},{\"description\":\"Frechet + Video Distance uses a model that captures coherence for changes in frames + and the quality of each frame. A smaller score indicates better video generation.\",\"id\":\"fvd\"},{\"description\":\"CLIPSIM + measures similarity between video frames and text using an image-text similarity + model. A higher score indicates better video generation.\",\"id\":\"clipsim\"}],\"models\":[{\"description\":\"A + strong model for consistent video generation.\",\"id\":\"rain1011/pyramid-flow-sd3\"},{\"description\":\"A + robust model for text-to-video generation.\",\"id\":\"VideoCrafter/VideoCrafter2\"},{\"description\":\"A + cutting-edge text-to-video generation model.\",\"id\":\"TIGER-Lab/T2V-Turbo-V2\"}],\"spaces\":[{\"description\":\"An + application that generates video from text.\",\"id\":\"VideoCrafter/VideoCrafter\"},{\"description\":\"Consistent + video generation application.\",\"id\":\"TIGER-Lab/T2V-Turbo-V2\"},{\"description\":\"A + cutting edge video generation application.\",\"id\":\"Pyramid-Flow/pyramid-flow\"}],\"summary\":\"Text-to-video + models can be used in any application that requires generating consistent + sequence of images from text. \",\"widgetModels\":[],\"id\":\"text-to-video\",\"label\":\"Text-to-Video\",\"libraries\":[\"diffusers\"]},\"token-classification\":{\"datasets\":[{\"description\":\"A + widely used dataset useful to benchmark named entity recognition models.\",\"id\":\"eriktks/conll2003\"},{\"description\":\"A + multilingual dataset of Wikipedia articles annotated for named entity recognition + in over 150 different languages.\",\"id\":\"unimelb-nlp/wikiann\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"My + name is Omar and I live in Z\xFCrich.\",\"type\":\"text\"}],\"outputs\":[{\"text\":\"My + name is Omar and I live in Z\xFCrich.\",\"tokens\":[{\"type\":\"PERSON\",\"start\":11,\"end\":15},{\"type\":\"GPE\",\"start\":30,\"end\":36}],\"type\":\"text-with-tokens\"}]},\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"\",\"id\":\"recall\"},{\"description\":\"\",\"id\":\"precision\"},{\"description\":\"\",\"id\":\"f1\"}],\"models\":[{\"description\":\"A + robust performance model to identify people, locations, organizations and + names of miscellaneous entities.\",\"id\":\"dslim/bert-base-NER\"},{\"description\":\"A + strong model to identify people, locations, organizations and names in multiple + languages.\",\"id\":\"FacebookAI/xlm-roberta-large-finetuned-conll03-english\"},{\"description\":\"A + token classification model specialized on medical entity recognition.\",\"id\":\"blaze999/Medical-NER\"},{\"description\":\"Flair + models are typically the state of the art in named entity recognition tasks.\",\"id\":\"flair/ner-english\"}],\"spaces\":[{\"description\":\"An + application that can recognizes entities, extracts noun chunks and recognizes + various linguistic features of each token.\",\"id\":\"spacy/gradio_pipeline_visualizer\"}],\"summary\":\"Token + classification is a natural language understanding task in which a label is + assigned to some tokens in a text. Some popular token classification subtasks + are Named Entity Recognition (NER) and Part-of-Speech (PoS) tagging. NER models + could be trained to identify specific entities in a text, such as dates, individuals + and places; and PoS tagging would identify, for example, which words in a + text are verbs, nouns, and punctuation marks.\",\"widgetModels\":[\"FacebookAI/xlm-roberta-large-finetuned-conll03-english\"],\"youtubeId\":\"wVHdVlPScxA\",\"id\":\"token-classification\",\"label\":\"Token + Classification\",\"libraries\":[\"adapter-transformers\",\"flair\",\"spacy\",\"span-marker\",\"stanza\",\"transformers\",\"transformers.js\"]},\"translation\":{\"canonicalId\":\"text2text-generation\",\"datasets\":[{\"description\":\"A + dataset of copyright-free books translated into 16 different languages.\",\"id\":\"Helsinki-NLP/opus_books\"},{\"description\":\"An + example of translation between programming languages. This dataset consists + of functions in Java and C#.\",\"id\":\"google/code_x_glue_cc_code_to_code_trans\"}],\"demo\":{\"inputs\":[{\"label\":\"Input\",\"content\":\"My + name is Omar and I live in Z\xFCrich.\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Output\",\"content\":\"Mein + Name ist Omar und ich wohne in Z\xFCrich.\",\"type\":\"text\"}]},\"metrics\":[{\"description\":\"BLEU + score is calculated by counting the number of shared single or subsequent + tokens between the generated sequence and the reference. Subsequent n tokens + are called \u201Cn-grams\u201D. Unigram refers to a single token while bi-gram + refers to token pairs and n-grams refer to n subsequent tokens. The score + ranges from 0 to 1, where 1 means the translation perfectly matched and 0 + did not match at all\",\"id\":\"bleu\"},{\"description\":\"\",\"id\":\"sacrebleu\"}],\"models\":[{\"description\":\"Very + powerful model that can translate many languages between each other, especially + low-resource languages.\",\"id\":\"facebook/nllb-200-1.3B\"},{\"description\":\"A + general-purpose Transformer that can be used to translate from English to + German, French, or Romanian.\",\"id\":\"google-t5/t5-base\"}],\"spaces\":[{\"description\":\"An + application that can translate between 100 languages.\",\"id\":\"Iker/Translate-100-languages\"},{\"description\":\"An + application that can translate between many languages.\",\"id\":\"Geonmo/nllb-translation-demo\"}],\"summary\":\"Translation + is the task of converting text from one language to another.\",\"widgetModels\":[\"facebook/mbart-large-50-many-to-many-mmt\"],\"youtubeId\":\"1JvfrvZgi6c\",\"id\":\"translation\",\"label\":\"Translation\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"unconditional-image-generation\":{\"datasets\":[{\"description\":\"The + CIFAR-100 dataset consists of 60000 32x32 colour images in 100 classes, with + 600 images per class.\",\"id\":\"cifar100\"},{\"description\":\"Multiple images + of celebrities, used for facial expression translation.\",\"id\":\"CelebA\"}],\"demo\":{\"inputs\":[{\"label\":\"Seed\",\"content\":\"42\",\"type\":\"text\"},{\"label\":\"Number + of images to generate:\",\"content\":\"4\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"unconditional-image-generation-output.jpeg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + inception score (IS) evaluates the quality of generated images. It measures + the diversity of the generated images (the model predictions are evenly distributed + across all possible labels) and their 'distinction' or 'sharpness' (the model + confidently predicts a single label for each image).\",\"id\":\"Inception + score (IS)\"},{\"description\":\"The Fr\xE9chet Inception Distance (FID) evaluates + the quality of images created by a generative model by calculating the distance + between feature vectors for real and generated images.\",\"id\":\"Fre\u0107het + Inception Distance (FID)\"}],\"models\":[{\"description\":\"High-quality image + generation model trained on the CIFAR-10 dataset. It synthesizes images of + the ten classes presented in the dataset using diffusion probabilistic models, + a class of latent variable models inspired by considerations from nonequilibrium + thermodynamics.\",\"id\":\"google/ddpm-cifar10-32\"},{\"description\":\"High-quality + image generation model trained on the 256x256 CelebA-HQ dataset. It synthesizes + images of faces using diffusion probabilistic models, a class of latent variable + models inspired by considerations from nonequilibrium thermodynamics.\",\"id\":\"google/ddpm-celebahq-256\"}],\"spaces\":[{\"description\":\"An + application that can generate realistic faces.\",\"id\":\"CompVis/celeba-latent-diffusion\"}],\"summary\":\"Unconditional + image generation is the task of generating images with no condition in any + context (like a prompt text or another image). Once trained, the model will + create images that resemble its training data distribution.\",\"widgetModels\":[\"\"],\"youtubeId\":\"\",\"id\":\"unconditional-image-generation\",\"label\":\"Unconditional + Image Generation\",\"libraries\":[\"diffusers\"]},\"video-text-to-text\":{\"datasets\":[{\"description\":\"Multiple-choice + questions and answers about videos.\",\"id\":\"lmms-lab/Video-MME\"},{\"description\":\"A + dataset of instructions and question-answer pairs about videos.\",\"id\":\"lmms-lab/VideoChatGPT\"},{\"description\":\"Large + video understanding dataset.\",\"id\":\"HuggingFaceFV/finevideo\"}],\"demo\":{\"inputs\":[{\"filename\":\"video-text-to-text-input.gif\",\"type\":\"img\"},{\"label\":\"Text + Prompt\",\"content\":\"What is happening in this video?\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Answer\",\"content\":\"The + video shows a series of images showing a fountain with water jets and a variety + of colorful flowers and butterflies in the background.\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"A + robust video-text-to-text model that can take in image and video inputs.\",\"id\":\"llava-hf/llava-onevision-qwen2-72b-ov-hf\"},{\"description\":\"Large + and powerful video-text-to-text model that can take in image and video inputs.\",\"id\":\"llava-hf/LLaVA-NeXT-Video-34B-hf\"}],\"spaces\":[{\"description\":\"An + application to chat with a video-text-to-text model.\",\"id\":\"llava-hf/video-llava\"},{\"description\":\"A + leaderboard for various video-text-to-text models.\",\"id\":\"opencompass/openvlm_video_leaderboard\"}],\"summary\":\"Video-text-to-text + models take in a video and a text prompt and output text. These models are + also called video-language models.\",\"widgetModels\":[\"\"],\"youtubeId\":\"\",\"id\":\"video-text-to-text\",\"label\":\"Video-Text-to-Text\",\"libraries\":[\"transformers\"]},\"visual-question-answering\":{\"datasets\":[{\"description\":\"A + widely used dataset containing questions (with answers) about images.\",\"id\":\"Graphcore/vqa\"},{\"description\":\"A + dataset to benchmark visual reasoning based on text in images.\",\"id\":\"facebook/textvqa\"}],\"demo\":{\"inputs\":[{\"filename\":\"elephant.jpeg\",\"type\":\"img\"},{\"label\":\"Question\",\"content\":\"What + is in this image?\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"elephant\",\"score\":0.97},{\"label\":\"elephants\",\"score\":0.06},{\"label\":\"animal\",\"score\":0.003}]}]},\"isPlaceholder\":false,\"metrics\":[{\"description\":\"\",\"id\":\"accuracy\"},{\"description\":\"Measures + how much a predicted answer differs from the ground truth based on the difference + in their semantic meaning.\",\"id\":\"wu-palmer similarity\"}],\"models\":[{\"description\":\"A + visual question answering model trained to convert charts and plots to text.\",\"id\":\"google/deplot\"},{\"description\":\"A + visual question answering model trained for mathematical reasoning and chart + derendering from images.\",\"id\":\"google/matcha-base\"},{\"description\":\"A + strong visual question answering that answers questions from book covers.\",\"id\":\"google/pix2struct-ocrvqa-large\"}],\"spaces\":[{\"description\":\"An + application that compares visual question answering models across different + tasks.\",\"id\":\"merve/pix2struct\"},{\"description\":\"An application that + can answer questions based on images.\",\"id\":\"nielsr/vilt-vqa\"},{\"description\":\"An + application that can caption images and answer questions about a given image. + \",\"id\":\"Salesforce/BLIP\"},{\"description\":\"An application that can + caption images and answer questions about a given image. \",\"id\":\"vumichien/Img2Prompt\"}],\"summary\":\"Visual + Question Answering is the task of answering open-ended questions based on + an image. They output natural language responses to natural language questions.\",\"widgetModels\":[\"dandelin/vilt-b32-finetuned-vqa\"],\"youtubeId\":\"\",\"id\":\"visual-question-answering\",\"label\":\"Visual + Question Answering\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"zero-shot-classification\":{\"datasets\":[{\"description\":\"A + widely used dataset used to benchmark multiple variants of text classification.\",\"id\":\"nyu-mll/glue\"},{\"description\":\"The + Multi-Genre Natural Language Inference (MultiNLI) corpus is a crowd-sourced + collection of 433k sentence pairs annotated with textual entailment information.\",\"id\":\"nyu-mll/multi_nli\"},{\"description\":\"FEVER + is a publicly available dataset for fact extraction and verification against + textual sources.\",\"id\":\"fever/fever\"}],\"demo\":{\"inputs\":[{\"label\":\"Text + Input\",\"content\":\"Dune is the best movie ever.\",\"type\":\"text\"},{\"label\":\"Candidate + Labels\",\"content\":\"CINEMA, ART, MUSIC\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"CINEMA\",\"score\":0.9},{\"label\":\"ART\",\"score\":0.1},{\"label\":\"MUSIC\",\"score\":0}]}]},\"metrics\":[],\"models\":[{\"description\":\"Powerful + zero-shot text classification model.\",\"id\":\"facebook/bart-large-mnli\"},{\"description\":\"Powerful + zero-shot multilingual text classification model that can accomplish multiple + tasks.\",\"id\":\"MoritzLaurer/mDeBERTa-v3-base-xnli-multilingual-nli-2mil7\"}],\"spaces\":[],\"summary\":\"Zero-shot + text classification is a task in natural language processing where a model + is trained on a set of labeled examples but is then able to classify new examples + from previously unseen classes.\",\"widgetModels\":[\"facebook/bart-large-mnli\"],\"id\":\"zero-shot-classification\",\"label\":\"Zero-Shot + Classification\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"zero-shot-image-classification\":{\"datasets\":[{\"description\":\"\",\"id\":\"\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-classification-input.jpeg\",\"type\":\"img\"},{\"label\":\"Classes\",\"content\":\"cat, + dog, bird\",\"type\":\"text\"}],\"outputs\":[{\"type\":\"chart\",\"data\":[{\"label\":\"Cat\",\"score\":0.664},{\"label\":\"Dog\",\"score\":0.329},{\"label\":\"Bird\",\"score\":0.008}]}]},\"metrics\":[{\"description\":\"Computes + the number of times the correct label appears in top K labels predicted\",\"id\":\"top-K + accuracy\"}],\"models\":[{\"description\":\"Robust image classification model + trained on publicly available image-caption data.\",\"id\":\"openai/clip-vit-base-patch16\"},{\"description\":\"Strong + zero-shot image classification model.\",\"id\":\"google/siglip-so400m-patch14-224\"},{\"description\":\"Small + yet powerful zero-shot image classification model that can run on edge devices.\",\"id\":\"apple/MobileCLIP-S1-OpenCLIP\"},{\"description\":\"Strong + image classification model for biomedical domain.\",\"id\":\"microsoft/BiomedCLIP-PubMedBERT_256-vit_base_patch16_224\"}],\"spaces\":[{\"description\":\"An + application that leverages zero-shot image classification to find best captions + to generate an image. \",\"id\":\"pharma/CLIP-Interrogator\"},{\"description\":\"An + application to compare different zero-shot image classification models. \",\"id\":\"merve/compare_clip_siglip\"}],\"summary\":\"Zero-shot + image classification is the task of classifying previously unseen classes + during training of a model.\",\"widgetModels\":[\"google/siglip-so400m-patch14-224\"],\"youtubeId\":\"\",\"id\":\"zero-shot-image-classification\",\"label\":\"Zero-Shot + Image Classification\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"zero-shot-object-detection\":{\"datasets\":[],\"demo\":{\"inputs\":[{\"filename\":\"zero-shot-object-detection-input.jpg\",\"type\":\"img\"},{\"label\":\"Classes\",\"content\":\"cat, + dog, bird\",\"type\":\"text\"}],\"outputs\":[{\"filename\":\"zero-shot-object-detection-output.jpg\",\"type\":\"img\"}]},\"metrics\":[{\"description\":\"The + Average Precision (AP) metric is the Area Under the PR Curve (AUC-PR). It + is calculated for each class separately\",\"id\":\"Average Precision\"},{\"description\":\"The + Mean Average Precision (mAP) metric is the overall average of the AP values\",\"id\":\"Mean + Average Precision\"},{\"description\":\"The AP\u03B1 metric is the Average + Precision at the IoU threshold of a \u03B1 value, for example, AP50 and AP75\",\"id\":\"AP\u03B1\"}],\"models\":[{\"description\":\"Solid + zero-shot object detection model.\",\"id\":\"IDEA-Research/grounding-dino-base\"},{\"description\":\"Cutting-edge + zero-shot object detection model.\",\"id\":\"google/owlv2-base-patch16-ensemble\"}],\"spaces\":[{\"description\":\"A + demo to try the state-of-the-art zero-shot object detection model, OWLv2.\",\"id\":\"merve/owlv2\"},{\"description\":\"A + demo that combines a zero-shot object detection and mask generation model + for zero-shot segmentation.\",\"id\":\"merve/OWLSAM\"}],\"summary\":\"Zero-shot + object detection is a computer vision task to detect objects and their classes + in images, without any prior training or knowledge of the classes. Zero-shot + object detection models receive an image as input, as well as a list of candidate + classes, and output the bounding boxes and labels where the objects have been + detected.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"zero-shot-object-detection\",\"label\":\"Zero-Shot + Object Detection\",\"libraries\":[\"transformers\",\"transformers.js\"]},\"text-to-3d\":{\"datasets\":[{\"description\":\"A + large dataset of over 10 million 3D objects.\",\"id\":\"allenai/objaverse-xl\"},{\"description\":\"Descriptive + captions for 3D objects in Objaverse.\",\"id\":\"tiange/Cap3D\"}],\"demo\":{\"inputs\":[{\"label\":\"Prompt\",\"content\":\"a + cat statue\",\"type\":\"text\"}],\"outputs\":[{\"label\":\"Result\",\"content\":\"text-to-3d-3d-output-filename.glb\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"Text-to-3D + mesh model by OpenAI\",\"id\":\"openai/shap-e\"},{\"description\":\"Generative + 3D gaussian splatting model.\",\"id\":\"ashawkey/LGM\"}],\"spaces\":[{\"description\":\"Text-to-3D + demo with mesh outputs.\",\"id\":\"hysts/Shap-E\"},{\"description\":\"Text/image-to-3D + demo with splat outputs.\",\"id\":\"ashawkey/LGM\"}],\"summary\":\"Text-to-3D + models take in text input and produce 3D output.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"text-to-3d\",\"label\":\"Text-to-3D\",\"libraries\":[\"diffusers\"]},\"image-to-3d\":{\"datasets\":[{\"description\":\"A + large dataset of over 10 million 3D objects.\",\"id\":\"allenai/objaverse-xl\"},{\"description\":\"A + dataset of isolated object images for evaluating image-to-3D models.\",\"id\":\"dylanebert/iso3d\"}],\"demo\":{\"inputs\":[{\"filename\":\"image-to-3d-image-input.png\",\"type\":\"img\"}],\"outputs\":[{\"label\":\"Result\",\"content\":\"image-to-3d-3d-output-filename.glb\",\"type\":\"text\"}]},\"metrics\":[],\"models\":[{\"description\":\"Fast + image-to-3D mesh model by Tencent.\",\"id\":\"TencentARC/InstantMesh\"},{\"description\":\"Fast + image-to-3D mesh model by StabilityAI\",\"id\":\"stabilityai/TripoSR\"},{\"description\":\"A + scaled up image-to-3D mesh model derived from TripoSR.\",\"id\":\"hwjiang/Real3D\"},{\"description\":\"Generative + 3D gaussian splatting model.\",\"id\":\"ashawkey/LGM\"}],\"spaces\":[{\"description\":\"Leaderboard + to evaluate image-to-3D models.\",\"id\":\"dylanebert/3d-arena\"},{\"description\":\"Image-to-3D + demo with mesh outputs.\",\"id\":\"TencentARC/InstantMesh\"},{\"description\":\"Image-to-3D + demo with mesh outputs.\",\"id\":\"stabilityai/TripoSR\"},{\"description\":\"Image-to-3D + demo with mesh outputs.\",\"id\":\"hwjiang/Real3D\"},{\"description\":\"Image-to-3D + demo with splat outputs.\",\"id\":\"dylanebert/LGM-mini\"}],\"summary\":\"Image-to-3D + models take in image input and produce 3D output.\",\"widgetModels\":[],\"youtubeId\":\"\",\"id\":\"image-to-3d\",\"label\":\"Image-to-3D\",\"libraries\":[\"diffusers\"]}}" + headers: + Access-Control-Allow-Origin: + - https://huggingface.co + Access-Control-Expose-Headers: + - X-Repo-Commit,X-Request-Id,X-Error-Code,X-Error-Message,X-Total-Count,ETag,Link,Accept-Ranges,Content-Range + Connection: + - keep-alive + Content-Length: + - '73726' + Content-Type: + - application/json; charset=utf-8 + Date: + - Thu, 31 Oct 2024 14:22:30 GMT + ETag: + - W/"11ffe-LYeRHzSmA1Ja2XTx5UWNPygcpnc" + Referrer-Policy: + - strict-origin-when-cross-origin + Vary: + - Origin + Via: + - 1.1 3734acf137431fb00caf3c73f9eb75fa.cloudfront.net (CloudFront) + X-Amz-Cf-Id: + - 9oTRZ3Kad0LCfJBvC1is2D-1tFtYr6Kof8vF7Aqg5ZYNU4xPzH-rIw== + X-Amz-Cf-Pop: + - CCU50-P1 + X-Cache: + - Miss from cloudfront + X-Powered-By: + - huggingface-moon + X-Request-Id: + - Root=1-672392a6-745874890ba7bfeb2dc2f431;d1943f1d-c463-48d4-b807-02bbb0da0f46 + cross-origin-opener-policy: + - same-origin + status: + code: 200 + message: OK +- request: + body: null + headers: + user-agent: + - unknown/None; hf_hub/0.26.2; python/3.9.12; torch/2.4.1 + method: POST + uri: https://api-inference.huggingface.co/models/facebook/bart-large-mnli + response: + body: + string: '{"sequence":"A new model offers an explanation for how the Galilean + satellites formed around the solar system''slargest world. Konstantin Batygin + did not set out to solve one of the solar system''s most puzzling mysteries + when he went for a run up a hill in Nice, France.","labels":["scientific discovery"],"scores":[0.9829296469688416]}' + headers: + Access-Control-Allow-Credentials: + - 'true' + Connection: + - keep-alive + Content-Length: + - '335' + Content-Type: + - application/json + Date: + - Thu, 31 Oct 2024 14:22:31 GMT + Vary: + - Origin, Access-Control-Request-Method, Access-Control-Request-Headers + x-compute-time: + - '0.178' + x-compute-type: + - cache + x-request-id: + - ihyEmjP1pdi3qL66XdFxO + x-sha: + - d7645e127eaf1aefc7862fd59a17a5aa8558b8ce + status: + code: 200 + message: OK +version: 1 diff --git a/tests/integrations/huggingface/huggingface_test.py b/tests/integrations/huggingface/huggingface_test.py new file mode 100644 index 00000000000..6803b9da5c5 --- /dev/null +++ b/tests/integrations/huggingface/huggingface_test.py @@ -0,0 +1,748 @@ +import asyncio +import os + +import pytest + +from weave.integrations.integration_utilities import op_name_from_ref + + +@pytest.mark.skip_clickhouse_client +@pytest.mark.vcr( + filter_headers=["authorization", "x-api-key"], + allowed_hosts=["api.wandb.ai", "localhost", "trace.wandb.ai"], +) +def test_huggingface_chat_completion(client): + from huggingface_hub import InferenceClient + + huggingface_client = InferenceClient( + api_key=os.environ.get("HUGGINGFACE_API_KEY", "DUMMY_API_KEY") + ) + image_url = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg" + huggingface_client.chat_completion( + model="meta-llama/Llama-3.2-11B-Vision-Instruct", + messages=[ + { + "role": "user", + "content": [ + {"type": "image_url", "image_url": {"url": image_url}}, + {"type": "text", "text": "Describe this image in one sentence."}, + ], + } + ], + max_tokens=500, + seed=42, + ) + + calls = list(client.calls()) + assert len(calls) == 1 + + call = calls[0] + assert call.started_at < call.ended_at + assert ( + op_name_from_ref(call.op_name) + == "huggingface_hub.InferenceClient.chat_completion" + ) + output = call.output + assert output.choices[0].finish_reason == "stop" + assert output.choices[0].index == 0 + assert "statue of liberty" in output.choices[0].message.content.lower() + assert output.choices[0].message.role == "assistant" + assert output.model == "meta-llama/Llama-3.2-11B-Vision-Instruct" + assert output.usage.prompt_tokens == 44 + + +@pytest.mark.skip_clickhouse_client +@pytest.mark.vcr( + filter_headers=["authorization", "x-api-key"], + allowed_hosts=["api.wandb.ai", "localhost", "trace.wandb.ai"], +) +def test_huggingface_chat_completion_stream(client): + from huggingface_hub import InferenceClient + + huggingface_client = InferenceClient( + api_key=os.environ.get("HUGGINGFACE_API_KEY", "DUMMY_API_KEY") + ) + image_url = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg" + [ + r + for r in huggingface_client.chat_completion( + model="meta-llama/Llama-3.2-11B-Vision-Instruct", + messages=[ + { + "role": "user", + "content": [ + {"type": "image_url", "image_url": {"url": image_url}}, + { + "type": "text", + "text": "Describe this image in one sentence.", + }, + ], + } + ], + max_tokens=500, + seed=42, + stream=True, + ) + ] + + calls = list(client.calls()) + assert len(calls) == 1 + + call = calls[0] + assert call.started_at < call.ended_at + assert ( + op_name_from_ref(call.op_name) + == "huggingface_hub.InferenceClient.chat_completion" + ) + output = call.output + assert output.choices[0].index == 0 + assert "statue of liberty" in output.choices[0].message.content.lower() + assert output.choices[0].message.role == "assistant" + assert output.model == "meta-llama/Llama-3.2-11B-Vision-Instruct" + + +@pytest.mark.skip_clickhouse_client +@pytest.mark.vcr( + filter_headers=["authorization", "x-api-key"], + allowed_hosts=["api.wandb.ai", "localhost", "trace.wandb.ai"], +) +def test_huggingface_chat_completion_async(client): + from huggingface_hub import AsyncInferenceClient + + huggingface_client = AsyncInferenceClient( + api_key=os.environ.get("HUGGINGFACE_API_KEY", "DUMMY_API_KEY") + ) + image_url = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg" + asyncio.run( + huggingface_client.chat_completion( + model="meta-llama/Llama-3.2-11B-Vision-Instruct", + messages=[ + { + "role": "user", + "content": [ + {"type": "image_url", "image_url": {"url": image_url}}, + { + "type": "text", + "text": "Describe this image in one sentence.", + }, + ], + } + ], + max_tokens=500, + seed=42, + ) + ) + + calls = list(client.calls()) + assert len(calls) == 1 + + call = calls[0] + assert call.started_at < call.ended_at + assert ( + op_name_from_ref(call.op_name) + == "huggingface_hub.AsyncInferenceClient.chat_completion" + ) + output = call.output + assert output.choices[0].finish_reason == "stop" + assert output.choices[0].index == 0 + assert "statue of liberty" in output.choices[0].message.content.lower() + assert output.choices[0].message.role == "assistant" + assert output.model == "meta-llama/Llama-3.2-11B-Vision-Instruct" + assert output.usage.prompt_tokens == 44 + + +@pytest.mark.skip_clickhouse_client +@pytest.mark.vcr( + filter_headers=["authorization", "x-api-key"], + allowed_hosts=["api.wandb.ai", "localhost", "trace.wandb.ai"], +) +def test_huggingface_document_question_answering(client): + from huggingface_hub import InferenceClient + + image_url = "https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" + InferenceClient( + api_key=os.environ.get("HUGGINGFACE_API_KEY", "DUMMY_API_KEY") + ).document_question_answering( + image=image_url, + model="impira/layoutlm-document-qa", + question="What is the invoice number?", + ) + + calls = list(client.calls()) + assert len(calls) == 1 + + call = calls[0] + assert call.started_at < call.ended_at + assert ( + op_name_from_ref(call.op_name) + == "huggingface_hub.InferenceClient.document_question_answering" + ) + output = call.output + assert output[0].answer == "us-001" + + +@pytest.mark.skip_clickhouse_client +@pytest.mark.vcr( + filter_headers=["authorization", "x-api-key"], + allowed_hosts=["api.wandb.ai", "localhost", "trace.wandb.ai"], +) +def test_huggingface_document_question_answering_async(client): + from huggingface_hub import AsyncInferenceClient + + image_url = "https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" + asyncio.run( + AsyncInferenceClient( + api_key=os.environ.get("HUGGINGFACE_API_KEY", "DUMMY_API_KEY") + ).document_question_answering( + image=image_url, + model="impira/layoutlm-document-qa", + question="What is the invoice number?", + ) + ) + + calls = list(client.calls()) + assert len(calls) == 1 + + call = calls[0] + assert call.started_at < call.ended_at + assert ( + op_name_from_ref(call.op_name) + == "huggingface_hub.AsyncInferenceClient.document_question_answering" + ) + output = call.output + assert output[0].answer == "us-001" + + +@pytest.mark.skip_clickhouse_client +@pytest.mark.vcr( + filter_headers=["authorization", "x-api-key"], + allowed_hosts=["api.wandb.ai", "localhost", "trace.wandb.ai"], +) +def test_huggingface_visual_question_answering(client): + from huggingface_hub import InferenceClient + + image_url = ( + "https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg" + ) + InferenceClient( + api_key=os.environ.get("HUGGINGFACE_API_KEY", "DUMMY_API_KEY") + ).visual_question_answering( + image=image_url, + question="What is the animal doing?", + ) + + calls = list(client.calls()) + assert len(calls) == 1 + + call = calls[0] + assert call.started_at < call.ended_at + assert ( + op_name_from_ref(call.op_name) + == "huggingface_hub.InferenceClient.visual_question_answering" + ) + output = call.output + assert output[0].answer == "laying down" + + +@pytest.mark.skip_clickhouse_client +@pytest.mark.vcr( + filter_headers=["authorization", "x-api-key"], + allowed_hosts=["api.wandb.ai", "localhost", "trace.wandb.ai"], +) +def test_huggingface_visual_question_answering_async(client): + from huggingface_hub import AsyncInferenceClient + + image_url = ( + "https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg" + ) + asyncio.run( + AsyncInferenceClient( + api_key=os.environ.get("HUGGINGFACE_API_KEY", "DUMMY_API_KEY") + ).visual_question_answering( + image=image_url, + question="What is the animal doing?", + ) + ) + + calls = list(client.calls()) + assert len(calls) == 1 + + call = calls[0] + assert call.started_at < call.ended_at + assert ( + op_name_from_ref(call.op_name) + == "huggingface_hub.AsyncInferenceClient.visual_question_answering" + ) + output = call.output + assert output[0].answer == "laying down" + + +@pytest.mark.skip_clickhouse_client +@pytest.mark.vcr( + filter_headers=["authorization", "x-api-key"], + allowed_hosts=["api.wandb.ai", "localhost", "trace.wandb.ai"], +) +def test_huggingface_fill_mask(client): + from huggingface_hub import InferenceClient + + InferenceClient( + api_key=os.getenv("HUGGINGFACE_API_KEY", "DUMMY_API_KEY") + ).fill_mask("The goal of life is .") + + calls = list(client.calls()) + assert len(calls) == 1 + + call = calls[0] + assert call.started_at < call.ended_at + assert op_name_from_ref(call.op_name) == "huggingface_hub.InferenceClient.fill_mask" + output = call.output + assert output[0].token_str in output[0].sequence + assert output[0].score > 0 + + +@pytest.mark.skip_clickhouse_client +@pytest.mark.vcr( + filter_headers=["authorization", "x-api-key"], + allowed_hosts=["api.wandb.ai", "localhost", "trace.wandb.ai"], +) +def test_huggingface_fill_mask_async(client): + from huggingface_hub import AsyncInferenceClient + + asyncio.run( + AsyncInferenceClient( + api_key=os.getenv("HUGGINGFACE_API_KEY", "DUMMY_API_KEY") + ).fill_mask("The goal of life is .") + ) + + calls = list(client.calls()) + assert len(calls) == 1 + + call = calls[0] + assert call.started_at < call.ended_at + assert ( + op_name_from_ref(call.op_name) + == "huggingface_hub.AsyncInferenceClient.fill_mask" + ) + output = call.output + assert output[0].token_str in output[0].sequence + assert output[0].score > 0 + + +@pytest.mark.skip_clickhouse_client +@pytest.mark.vcr( + filter_headers=["authorization", "x-api-key"], + allowed_hosts=["api.wandb.ai", "localhost", "trace.wandb.ai"], +) +def test_huggingface_question_answering(client): + from huggingface_hub import InferenceClient + + InferenceClient( + api_key=os.getenv("HUGGINGFACE_API_KEY", "DUMMY_API_KEY") + ).question_answering( + question="What's my name?", context="My name is Clara and I live in Berkeley." + ) + + calls = list(client.calls()) + assert len(calls) == 1 + + call = calls[0] + assert call.started_at < call.ended_at + assert ( + op_name_from_ref(call.op_name) + == "huggingface_hub.InferenceClient.question_answering" + ) + output = call.output + assert output.answer == "Clara" + + +@pytest.mark.skip_clickhouse_client +@pytest.mark.vcr( + filter_headers=["authorization", "x-api-key"], + allowed_hosts=["api.wandb.ai", "localhost", "trace.wandb.ai"], +) +def test_huggingface_question_answering_async(client): + from huggingface_hub import AsyncInferenceClient + + asyncio.run( + AsyncInferenceClient( + api_key=os.getenv("HUGGINGFACE_API_KEY", "DUMMY_API_KEY") + ).question_answering( + question="What's my name?", + context="My name is Clara and I live in Berkeley.", + ) + ) + + calls = list(client.calls()) + assert len(calls) == 1 + + call = calls[0] + assert call.started_at < call.ended_at + assert ( + op_name_from_ref(call.op_name) + == "huggingface_hub.AsyncInferenceClient.question_answering" + ) + output = call.output + assert output.answer == "Clara" + + +@pytest.mark.skip_clickhouse_client +@pytest.mark.vcr( + filter_headers=["authorization", "x-api-key"], + allowed_hosts=["api.wandb.ai", "localhost", "trace.wandb.ai"], +) +def test_huggingface_sentence_similarity(client): + from huggingface_hub import InferenceClient + + InferenceClient( + api_key=os.getenv("HUGGINGFACE_API_KEY", "DUMMY_API_KEY") + ).sentence_similarity( + "Machine learning is so easy.", + other_sentences=[ + "Deep learning is so straightforward.", + "This is so difficult, like rocket science.", + "I can't believe how much I struggled with this.", + ], + ) + + calls = list(client.calls()) + assert len(calls) == 1 + + call = calls[0] + assert call.started_at < call.ended_at + assert ( + op_name_from_ref(call.op_name) + == "huggingface_hub.InferenceClient.sentence_similarity" + ) + output = call.output + for item in output: + assert item > 0 + + +@pytest.mark.skip_clickhouse_client +@pytest.mark.vcr( + filter_headers=["authorization", "x-api-key"], + allowed_hosts=["api.wandb.ai", "localhost", "trace.wandb.ai"], +) +def test_huggingface_sentence_similarity_async(client): + from huggingface_hub import AsyncInferenceClient + + asyncio.run( + AsyncInferenceClient( + api_key=os.getenv("HUGGINGFACE_API_KEY", "DUMMY_API_KEY") + ).sentence_similarity( + "Machine learning is so easy.", + other_sentences=[ + "Deep learning is so straightforward.", + "This is so difficult, like rocket science.", + "I can't believe how much I struggled with this.", + ], + ) + ) + + calls = list(client.calls()) + assert len(calls) == 1 + + call = calls[0] + assert call.started_at < call.ended_at + assert ( + op_name_from_ref(call.op_name) + == "huggingface_hub.AsyncInferenceClient.sentence_similarity" + ) + output = call.output + for item in output: + assert item > 0 + + +@pytest.mark.skip_clickhouse_client +@pytest.mark.vcr( + filter_headers=["authorization", "x-api-key"], + allowed_hosts=["api.wandb.ai", "localhost", "trace.wandb.ai"], +) +def test_huggingface_summarization(client): + from huggingface_hub import InferenceClient + + InferenceClient( + api_key=os.getenv("HUGGINGFACE_API_KEY", "DUMMY_API_KEY") + ).summarization( + "Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged. It was popularised in the 1960s with the release of Letraset sheets containing Lorem Ipsum passages, and more recently with desktop publishing software like Aldus PageMaker including versions of Lorem Ipsum." + ) + + calls = list(client.calls()) + assert len(calls) == 1 + + call = calls[0] + assert call.started_at < call.ended_at + assert ( + op_name_from_ref(call.op_name) + == "huggingface_hub.InferenceClient.summarization" + ) + output = call.output + assert "Lorem Ipsum" in output.summary_text + + +@pytest.mark.skip_clickhouse_client +@pytest.mark.vcr( + filter_headers=["authorization", "x-api-key"], + allowed_hosts=["api.wandb.ai", "localhost", "trace.wandb.ai"], +) +def test_huggingface_table_question_answering(client): + from huggingface_hub import InferenceClient + + query = "How many stars does the transformers repository have?" + table = { + "Repository": ["Transformers", "Datasets", "Tokenizers"], + "Stars": ["36542", "4512", "3934"], + } + InferenceClient( + api_key=os.getenv("HUGGINGFACE_API_KEY", "DUMMY_API_KEY") + ).table_question_answering(table, query, model="google/tapas-base-finetuned-wtq") + + calls = list(client.calls()) + assert len(calls) == 1 + + call = calls[0] + assert call.started_at < call.ended_at + assert ( + op_name_from_ref(call.op_name) + == "huggingface_hub.InferenceClient.table_question_answering" + ) + output = call.output + assert output.answer == "AVERAGE > 36542" + + +@pytest.mark.skip_clickhouse_client +@pytest.mark.vcr( + filter_headers=["authorization", "x-api-key"], + allowed_hosts=["api.wandb.ai", "localhost", "trace.wandb.ai"], +) +def test_huggingface_table_question_answering_async(client): + from huggingface_hub import AsyncInferenceClient + + query = "How many stars does the transformers repository have?" + table = { + "Repository": ["Transformers", "Datasets", "Tokenizers"], + "Stars": ["36542", "4512", "3934"], + } + asyncio.run( + AsyncInferenceClient( + api_key=os.getenv("HUGGINGFACE_API_KEY", "DUMMY_API_KEY") + ).table_question_answering( + table, query, model="google/tapas-base-finetuned-wtq" + ) + ) + + calls = list(client.calls()) + assert len(calls) == 1 + + call = calls[0] + assert call.started_at < call.ended_at + assert ( + op_name_from_ref(call.op_name) + == "huggingface_hub.AsyncInferenceClient.table_question_answering" + ) + output = call.output + assert output.answer == "AVERAGE > 36542" + + +@pytest.mark.skip_clickhouse_client +@pytest.mark.vcr( + filter_headers=["authorization", "x-api-key"], + allowed_hosts=["api.wandb.ai", "localhost", "trace.wandb.ai"], +) +def test_huggingface_text_classification(client): + from huggingface_hub import InferenceClient + + InferenceClient( + api_key=os.getenv("HUGGINGFACE_API_KEY", "DUMMY_API_KEY") + ).text_classification("I like you") + + calls = list(client.calls()) + assert len(calls) == 1 + + call = calls[0] + assert call.started_at < call.ended_at + assert ( + op_name_from_ref(call.op_name) + == "huggingface_hub.InferenceClient.text_classification" + ) + output = call.output + assert output[0].label == "POSITIVE" + assert output[0].score > 0 + + +@pytest.mark.skip_clickhouse_client +@pytest.mark.vcr( + filter_headers=["authorization", "x-api-key"], + allowed_hosts=["api.wandb.ai", "localhost", "trace.wandb.ai"], +) +def test_huggingface_text_classification_async(client): + from huggingface_hub import AsyncInferenceClient + + asyncio.run( + AsyncInferenceClient( + api_key=os.getenv("HUGGINGFACE_API_KEY", "DUMMY_API_KEY") + ).text_classification("I like you") + ) + + calls = list(client.calls()) + assert len(calls) == 1 + + call = calls[0] + assert call.started_at < call.ended_at + assert ( + op_name_from_ref(call.op_name) + == "huggingface_hub.AsyncInferenceClient.text_classification" + ) + output = call.output + assert output[0].label == "POSITIVE" + assert output[0].score > 0 + + +@pytest.mark.skip_clickhouse_client +@pytest.mark.vcr( + filter_headers=["authorization", "x-api-key"], + allowed_hosts=["api.wandb.ai", "localhost", "trace.wandb.ai"], +) +def test_huggingface_token_classification(client): + from huggingface_hub import InferenceClient + + InferenceClient( + api_key=os.getenv("HUGGINGFACE_API_KEY", "DUMMY_API_KEY") + ).token_classification( + "My name is Sarah Jessica Parker but you can call me Jessica" + ) + + calls = list(client.calls()) + assert len(calls) == 1 + + call = calls[0] + assert call.started_at < call.ended_at + assert ( + op_name_from_ref(call.op_name) + == "huggingface_hub.InferenceClient.token_classification" + ) + output = call.output + assert output[0].word == "Sarah Jessica Parker" + assert output[0].score > 0 + + +@pytest.mark.skip_clickhouse_client +@pytest.mark.vcr( + filter_headers=["authorization", "x-api-key"], + allowed_hosts=["api.wandb.ai", "localhost", "trace.wandb.ai"], +) +def test_huggingface_token_classification_async(client): + from huggingface_hub import AsyncInferenceClient + + asyncio.run( + AsyncInferenceClient( + api_key=os.getenv("HUGGINGFACE_API_KEY", "DUMMY_API_KEY") + ).token_classification( + "My name is Sarah Jessica Parker but you can call me Jessica" + ) + ) + + calls = list(client.calls()) + assert len(calls) == 1 + + call = calls[0] + assert call.started_at < call.ended_at + assert ( + op_name_from_ref(call.op_name) + == "huggingface_hub.AsyncInferenceClient.token_classification" + ) + output = call.output + assert output[0].word == "Sarah Jessica Parker" + assert output[0].score > 0 + + +@pytest.mark.skip_clickhouse_client +@pytest.mark.vcr( + filter_headers=["authorization", "x-api-key"], + allowed_hosts=["api.wandb.ai", "localhost", "trace.wandb.ai"], +) +def test_huggingface_translation(client): + from huggingface_hub import InferenceClient + + InferenceClient( + api_key=os.getenv("HUGGINGFACE_API_KEY", "DUMMY_API_KEY") + ).translation( + "My name is Wolfgang and I live in Berlin", model="Helsinki-NLP/opus-mt-en-fr" + ) + + calls = list(client.calls()) + assert len(calls) == 1 + + call = calls[0] + assert call.started_at < call.ended_at + assert ( + op_name_from_ref(call.op_name) == "huggingface_hub.InferenceClient.translation" + ) + output = call.output + assert "Wolfgang" in output.translation_text + + +@pytest.mark.skip_clickhouse_client +@pytest.mark.vcr( + filter_headers=["authorization", "x-api-key"], + allowed_hosts=["api.wandb.ai", "localhost", "trace.wandb.ai"], +) +def test_huggingface_zero_shot_classification(client): + from huggingface_hub import InferenceClient + + text = ( + "A new model offers an explanation for how the Galilean satellites formed around the solar system's" + "largest world. Konstantin Batygin did not set out to solve one of the solar system's most puzzling" + " mysteries when he went for a run up a hill in Nice, France." + ) + labels = ["scientific discovery"] + InferenceClient( + api_key=os.getenv("HUGGINGFACE_API_KEY", "DUMMY_API_KEY") + ).zero_shot_classification(text, labels) + + calls = list(client.calls()) + assert len(calls) == 1 + + call = calls[0] + assert call.started_at < call.ended_at + assert ( + op_name_from_ref(call.op_name) + == "huggingface_hub.InferenceClient.zero_shot_classification" + ) + output = call.output + assert output[0].label == "scientific discovery" + assert output[0].score > 0 + + +@pytest.mark.skip_clickhouse_client +@pytest.mark.vcr( + filter_headers=["authorization", "x-api-key"], + allowed_hosts=["api.wandb.ai", "localhost", "trace.wandb.ai"], +) +def test_huggingface_zero_shot_classification_async(client): + from huggingface_hub import AsyncInferenceClient + + text = ( + "A new model offers an explanation for how the Galilean satellites formed around the solar system's" + "largest world. Konstantin Batygin did not set out to solve one of the solar system's most puzzling" + " mysteries when he went for a run up a hill in Nice, France." + ) + labels = ["scientific discovery"] + asyncio.run( + AsyncInferenceClient( + api_key=os.getenv("HUGGINGFACE_API_KEY", "DUMMY_API_KEY") + ).zero_shot_classification(text, labels) + ) + + calls = list(client.calls()) + assert len(calls) == 1 + + call = calls[0] + assert call.started_at < call.ended_at + assert ( + op_name_from_ref(call.op_name) + == "huggingface_hub.AsyncInferenceClient.zero_shot_classification" + ) + output = call.output + assert output[0].label == "scientific discovery" + assert output[0].score > 0 diff --git a/weave/integrations/huggingface/__init__.py b/weave/integrations/huggingface/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/weave/integrations/huggingface/huggingface_sdk.py b/weave/integrations/huggingface/huggingface_sdk.py new file mode 100644 index 00000000000..fa445deb6d8 --- /dev/null +++ b/weave/integrations/huggingface/huggingface_sdk.py @@ -0,0 +1,284 @@ +import importlib +from functools import wraps +from typing import TYPE_CHECKING, Any, Callable, Optional, Union + +import weave +from weave.trace.op_extensions.accumulator import add_accumulator +from weave.trace.patcher import MultiPatcher, SymbolPatcher + +if TYPE_CHECKING: + from huggingface_hub.inference._generated.types.chat_completion import ( + ChatCompletionOutput, + ChatCompletionStreamOutput, + ) + + +def huggingface_accumulator( + acc: Optional[Union["ChatCompletionStreamOutput", "ChatCompletionOutput"]], + value: "ChatCompletionStreamOutput", +) -> "ChatCompletionOutput": + from huggingface_hub.inference._generated.types.chat_completion import ( + ChatCompletionOutput, + ChatCompletionOutputComplete, + ChatCompletionOutputMessage, + ChatCompletionOutputUsage, + ) + + if acc is None: + acc = ChatCompletionOutput( + choices=[ + ChatCompletionOutputComplete( + index=choice.index, + message=ChatCompletionOutputMessage( + content=choice.delta.content or "", + role=choice.delta.role or "assistant", + ), + finish_reason=None, + ) + for choice in value.choices + ], + created=value.created, + id=value.id, + model=value.model, + system_fingerprint=value.system_fingerprint, + usage=value.usage, + ) + return acc + + # Accumulate subsequent chunks + for idx, value_choice in enumerate(value.choices): + acc.choices[idx].message.content += value_choice.delta.content or "" + + if acc.usage is None: + acc.usage = ChatCompletionOutputUsage( + completion_tokens=0, prompt_tokens=0, total_tokens=0 + ) + # # For some reason, value.usage is always coming `None` + # # Might be a bug in `huggingface_hub.InferenceClient` + # if value.usage is not None: + # acc.usage.completion_tokens += value.usage.completion_tokens + # acc.usage.prompt_tokens += value.usage.prompt_tokens + # acc.usage.total_tokens += value.usage.total_tokens + return acc + + +def huggingface_wrapper_sync(name: str) -> Callable[[Callable], Callable]: + def wrapper(fn: Callable) -> Callable: + op = weave.op()(fn) + op.name = name + return add_accumulator( + op, # type: ignore + make_accumulator=lambda inputs: huggingface_accumulator, + should_accumulate=lambda inputs: isinstance(inputs, dict) + and bool(inputs.get("stream")), + ) + + return wrapper + + +def huggingface_wrapper_async(name: str) -> Callable[[Callable], Callable]: + def wrapper(fn: Callable) -> Callable: + def _fn_wrapper(fn: Callable) -> Callable: + @wraps(fn) + async def _async_wrapper(*args: Any, **kwargs: Any) -> Any: + return await fn(*args, **kwargs) + + return _async_wrapper + + "We need to do this so we can check if `stream` is used" + op = weave.op()(_fn_wrapper(fn)) + op.name = name # type: ignore + return add_accumulator( + op, # type: ignore + make_accumulator=lambda inputs: huggingface_accumulator, + should_accumulate=lambda inputs: isinstance(inputs, dict) + and bool(inputs.get("stream")), + ) + + return wrapper + + +huggingface_patcher = MultiPatcher( + [ + SymbolPatcher( + lambda: importlib.import_module("huggingface_hub"), + "InferenceClient.chat_completion", + huggingface_wrapper_sync( + name="huggingface_hub.InferenceClient.chat_completion" + ), + ), + SymbolPatcher( + lambda: importlib.import_module("huggingface_hub"), + "AsyncInferenceClient.chat_completion", + huggingface_wrapper_async( + name="huggingface_hub.AsyncInferenceClient.chat_completion" + ), + ), + SymbolPatcher( + lambda: importlib.import_module("huggingface_hub"), + "InferenceClient.document_question_answering", + huggingface_wrapper_sync( + name="huggingface_hub.InferenceClient.document_question_answering" + ), + ), + SymbolPatcher( + lambda: importlib.import_module("huggingface_hub"), + "AsyncInferenceClient.document_question_answering", + huggingface_wrapper_sync( + name="huggingface_hub.AsyncInferenceClient.document_question_answering" + ), + ), + SymbolPatcher( + lambda: importlib.import_module("huggingface_hub"), + "InferenceClient.visual_question_answering", + huggingface_wrapper_sync( + name="huggingface_hub.InferenceClient.visual_question_answering" + ), + ), + SymbolPatcher( + lambda: importlib.import_module("huggingface_hub"), + "AsyncInferenceClient.visual_question_answering", + huggingface_wrapper_sync( + name="huggingface_hub.AsyncInferenceClient.visual_question_answering" + ), + ), + SymbolPatcher( + lambda: importlib.import_module("huggingface_hub"), + "InferenceClient.fill_mask", + huggingface_wrapper_sync(name="huggingface_hub.InferenceClient.fill_mask"), + ), + SymbolPatcher( + lambda: importlib.import_module("huggingface_hub"), + "AsyncInferenceClient.fill_mask", + huggingface_wrapper_sync( + name="huggingface_hub.AsyncInferenceClient.fill_mask" + ), + ), + SymbolPatcher( + lambda: importlib.import_module("huggingface_hub"), + "InferenceClient.question_answering", + huggingface_wrapper_sync( + name="huggingface_hub.InferenceClient.question_answering" + ), + ), + SymbolPatcher( + lambda: importlib.import_module("huggingface_hub"), + "AsyncInferenceClient.question_answering", + huggingface_wrapper_sync( + name="huggingface_hub.AsyncInferenceClient.question_answering" + ), + ), + SymbolPatcher( + lambda: importlib.import_module("huggingface_hub"), + "InferenceClient.sentence_similarity", + huggingface_wrapper_sync( + name="huggingface_hub.InferenceClient.sentence_similarity" + ), + ), + SymbolPatcher( + lambda: importlib.import_module("huggingface_hub"), + "AsyncInferenceClient.sentence_similarity", + huggingface_wrapper_sync( + name="huggingface_hub.AsyncInferenceClient.sentence_similarity" + ), + ), + SymbolPatcher( + lambda: importlib.import_module("huggingface_hub"), + "InferenceClient.summarization", + huggingface_wrapper_sync( + name="huggingface_hub.InferenceClient.summarization" + ), + ), + SymbolPatcher( + lambda: importlib.import_module("huggingface_hub"), + "AsyncInferenceClient.summarization", + huggingface_wrapper_sync( + name="huggingface_hub.AsyncInferenceClient.summarization" + ), + ), + SymbolPatcher( + lambda: importlib.import_module("huggingface_hub"), + "InferenceClient.table_question_answering", + huggingface_wrapper_sync( + name="huggingface_hub.InferenceClient.table_question_answering" + ), + ), + SymbolPatcher( + lambda: importlib.import_module("huggingface_hub"), + "AsyncInferenceClient.table_question_answering", + huggingface_wrapper_sync( + name="huggingface_hub.AsyncInferenceClient.table_question_answering" + ), + ), + SymbolPatcher( + lambda: importlib.import_module("huggingface_hub"), + "InferenceClient.text_classification", + huggingface_wrapper_sync( + name="huggingface_hub.InferenceClient.text_classification" + ), + ), + SymbolPatcher( + lambda: importlib.import_module("huggingface_hub"), + "AsyncInferenceClient.text_classification", + huggingface_wrapper_sync( + name="huggingface_hub.AsyncInferenceClient.text_classification" + ), + ), + SymbolPatcher( + lambda: importlib.import_module("huggingface_hub"), + "InferenceClient.token_classification", + huggingface_wrapper_sync( + name="huggingface_hub.InferenceClient.token_classification" + ), + ), + SymbolPatcher( + lambda: importlib.import_module("huggingface_hub"), + "AsyncInferenceClient.token_classification", + huggingface_wrapper_sync( + name="huggingface_hub.AsyncInferenceClient.token_classification" + ), + ), + SymbolPatcher( + lambda: importlib.import_module("huggingface_hub"), + "InferenceClient.translation", + huggingface_wrapper_sync( + name="huggingface_hub.InferenceClient.translation" + ), + ), + SymbolPatcher( + lambda: importlib.import_module("huggingface_hub"), + "AsyncInferenceClient.translation", + huggingface_wrapper_sync( + name="huggingface_hub.AsyncInferenceClient.translation" + ), + ), + SymbolPatcher( + lambda: importlib.import_module("huggingface_hub"), + "InferenceClient.zero_shot_classification", + huggingface_wrapper_sync( + name="huggingface_hub.InferenceClient.zero_shot_classification" + ), + ), + SymbolPatcher( + lambda: importlib.import_module("huggingface_hub"), + "AsyncInferenceClient.zero_shot_classification", + huggingface_wrapper_sync( + name="huggingface_hub.AsyncInferenceClient.zero_shot_classification" + ), + ), + SymbolPatcher( + lambda: importlib.import_module("huggingface_hub"), + "InferenceClient.text_to_image", + huggingface_wrapper_sync( + name="huggingface_hub.InferenceClient.text_to_image" + ), + ), + SymbolPatcher( + lambda: importlib.import_module("huggingface_hub"), + "AsyncInferenceClient.text_to_image", + huggingface_wrapper_sync( + name="huggingface_hub.AsyncInferenceClient.text_to_image" + ), + ), + ] +) diff --git a/weave/trace/autopatch.py b/weave/trace/autopatch.py index 3a5dca14556..689865b6828 100644 --- a/weave/trace/autopatch.py +++ b/weave/trace/autopatch.py @@ -14,6 +14,7 @@ def autopatch() -> None: google_genai_patcher, ) from weave.integrations.groq.groq_sdk import groq_patcher + from weave.integrations.huggingface.huggingface_sdk import huggingface_patcher from weave.integrations.instructor.instructor_sdk import instructor_patcher from weave.integrations.langchain.langchain import langchain_patcher from weave.integrations.litellm.litellm import litellm_patcher @@ -35,6 +36,7 @@ def autopatch() -> None: cerebras_patcher.attempt_patch() cohere_patcher.attempt_patch() google_genai_patcher.attempt_patch() + huggingface_patcher.attempt_patch() notdiamond_patcher.attempt_patch() vertexai_patcher.attempt_patch() @@ -48,6 +50,7 @@ def reset_autopatch() -> None: google_genai_patcher, ) from weave.integrations.groq.groq_sdk import groq_patcher + from weave.integrations.huggingface.huggingface_sdk import huggingface_patcher from weave.integrations.instructor.instructor_sdk import instructor_patcher from weave.integrations.langchain.langchain import langchain_patcher from weave.integrations.litellm.litellm import litellm_patcher @@ -69,5 +72,6 @@ def reset_autopatch() -> None: cerebras_patcher.undo_patch() cohere_patcher.undo_patch() google_genai_patcher.undo_patch() + huggingface_patcher.undo_patch() notdiamond_patcher.undo_patch() vertexai_patcher.undo_patch()