Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

【PaddlePaddle Hackathon 4】No.63 : add embedding fp16 test #51321

Merged
merged 18 commits into from
Aug 4, 2023
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
57 changes: 55 additions & 2 deletions test/legacy_test/test_lookup_table_v2_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
import unittest

import numpy as np
from eager_op_test import OpTest, skip_check_grad_ci
from eager_op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci
from op import Operator

import paddle
Expand Down Expand Up @@ -47,11 +47,17 @@ class TestLookupTableOp(OpTest):
def setUp(self):
self.op_type = "lookup_table_v2"
self.python_api = paddle.nn.functional.embedding
table = np.random.random((17, 31)).astype("float64")
self.init_dtype()

table = np.random.random((17, 31)).astype(self.dtype)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这个算子低精度实现存在Bug,对于奇数元素数量的场景处理存在问题。先在低精度的case里面,换一个偶数元素的shape吧,我们后续修复这个算子问题。

另外还有bf16的单测

ids = np.random.randint(0, 17, 4).astype(self.id_dtype())

self.inputs = {'W': table, 'Ids': ids}
self.outputs = {'Out': table[ids]}

def init_dtype(self):
self.dtype = "float64"

def id_dtype(self):
return "int64"

Expand Down Expand Up @@ -297,6 +303,53 @@ def test_param_dtype():
)


class TestEmbeddingFP16OP(TestLookupTableOp):
def setUp(self):
self.op_type = "lookup_table_v2"
self.python_api = paddle.nn.functional.embedding
self.init_dtype()

table = np.random.random((18, 32)).astype(self.dtype)
ids = np.random.randint(0, 18, 4).astype(self.id_dtype())

self.inputs = {'W': table, 'Ids': ids}
self.outputs = {'Out': table[ids]}

def init_dtype(self):
self.dtype = np.float16


@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not complied with CUDA and not support the bfloat16",
)
class TestEmbeddingBF16OP(OpTest):
def setUp(self):
self.op_type = "lookup_table_v2"
self.python_api = paddle.nn.functional.embedding
self.dtype = np.uint16

table = np.random.random((18, 32)).astype("float32")
ids = np.random.randint(0, 18, 4).astype(self.id_dtype())

self.inputs = {'W': convert_float_to_uint16(table), 'Ids': ids}
self.outputs = {'Out': convert_float_to_uint16(table[ids])}

def id_dtype(self):
return "int64"

def test_check_output(self):
place = core.CUDAPlace(0)
self.check_output_with_place(place, check_cinn=True)

def test_check_grad(self):
place = core.CUDAPlace(0)
self.check_grad_with_place(
place, ['W'], 'Out', no_grad_set=set('Ids'), check_cinn=True
)


if __name__ == "__main__":
paddle.enable_static()
unittest.main()