-
Notifications
You must be signed in to change notification settings - Fork 5.6k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
【PaddlePaddle Hackathon 4】No.56 : add fp16 test and bf16 for poisson #51662
Merged
Merged
Changes from 21 commits
Commits
Show all changes
22 commits
Select commit
Hold shift + click to select a range
d8d29f1
add fp16 and bf16 support for poisson
longranger2 0d69755
add fp16 and bf16 support for searchsorted
longranger2 e23441c
Merge branch 'develop' into fp16_56_3
longranger2 2718d8d
fix bug
longranger2 85a4abe
Merge branch 'PaddlePaddle:develop' into fp16_56_3
longranger2 bf73134
Merge branch 'develop' into fp16_56_3
longranger2 9409af7
Update test_searchsorted_op.py
longranger2 6c7ac27
Update test_poisson_op.py
longranger2 2701d84
Merge branch 'PaddlePaddle:develop' into fp16_56_3
longranger2 8f280ee
fix bug
longranger2 a3ac092
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
longranger2 26e4f91
remove the searchorted
longranger2 1c2419b
Update test_poisson_op.py
longranger2 0525ca4
fix bug of TestPoissonBF16Op
longranger2 748612f
Update test_poisson_op.py
longranger2 fded554
Update test_poisson_op.py
longranger2 327e711
Merge branch 'PaddlePaddle:develop' into fp16_56_3
longranger2 229cc55
Update test_poisson_op.py
longranger2 37a69bd
fix bug of import
longranger2 2f9ffc0
Merge branch 'PaddlePaddle:develop' into fp16_56_3
longranger2 1713590
Merge branch 'develop' of https://github.com/longranger2/Paddle into …
longranger2 5bda810
fix bug
longranger2 File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -16,9 +16,14 @@ | |
import unittest | ||
|
||
import numpy as np | ||
from eager_op_test import OpTest | ||
from eager_op_test import ( | ||
OpTest, | ||
convert_float_to_uint16, | ||
convert_uint16_to_float, | ||
) | ||
|
||
import paddle | ||
from paddle.fluid import core | ||
|
||
paddle.enable_static() | ||
paddle.seed(100) | ||
|
@@ -42,17 +47,20 @@ class TestPoissonOp1(OpTest): | |
def setUp(self): | ||
self.op_type = "poisson" | ||
self.python_api = paddle.tensor.poisson | ||
self.init_dtype() | ||
self.config() | ||
|
||
self.attrs = {} | ||
self.inputs = {'X': np.full([2048, 1024], self.lam, dtype=self.dtype)} | ||
self.outputs = {'Out': np.ones([2048, 1024], dtype=self.dtype)} | ||
|
||
def init_dtype(self): | ||
self.dtype = "float64" | ||
|
||
def config(self): | ||
self.lam = 10 | ||
self.a = 5 | ||
self.b = 15 | ||
self.dtype = "float64" | ||
|
||
def verify_output(self, outs): | ||
hist, prob = output_hist(np.array(outs[0]), self.lam, self.a, self.b) | ||
|
@@ -368,5 +376,56 @@ def test_fixed_random_number(self): | |
paddle.enable_static() | ||
|
||
|
||
class TestPoissonFP16OP(TestPoissonOp1): | ||
def init_dtype(self): | ||
self.dtype = np.float16 | ||
|
||
|
||
@unittest.skipIf( | ||
not core.is_compiled_with_cuda() | ||
or not core.is_bfloat16_supported(core.CUDAPlace(0)), | ||
"core is not complied with CUDA and not support the bfloat16", | ||
) | ||
class TestPoissonBF16Op(OpTest): | ||
def setUp(self): | ||
self.op_type = "poisson" | ||
self.python_api = paddle.tensor.poisson | ||
self.__class__.op_type = self.op_type | ||
self.config() | ||
x = np.full([2048, 1024], self.lam, dtype="float32") | ||
out = np.ones([2048, 1024], dtype="float32") | ||
x = convert_uint16_to_float(convert_float_to_uint16(x)) | ||
out = convert_uint16_to_float(convert_float_to_uint16(out)) | ||
self.attrs = {} | ||
self.inputs = {'X': convert_float_to_uint16(x)} | ||
self.outputs = {'Out': convert_float_to_uint16(out)} | ||
|
||
def config(self): | ||
self.lam = 10 | ||
self.a = 5 | ||
self.b = 15 | ||
self.dtype = np.uint16 | ||
|
||
def verify_output(self, outs): | ||
hist, prob = output_hist(np.array(outs[0]), self.lam, self.a, self.b) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 这一行错误原因是outs[0]是op的输出,由于输出是bf16类型,np.array后变成了uint16。所以为了得到正确的计算结果,这里要将转为uint16的numpy数据,转回float32 hist, prob = output_hist(convert_uint16_to_float(np.array(outs[0])), self.lam, self.a, self.b) |
||
np.testing.assert_allclose(hist, prob, rtol=0.01) | ||
|
||
def test_check_output(self): | ||
place = core.CUDAPlace(0) | ||
self.check_output_with_place_customized(self.verify_output, place) | ||
|
||
def test_check_grad(self): | ||
place = core.CUDAPlace(0) | ||
self.check_grad_with_place( | ||
place, | ||
['X'], | ||
'Out', | ||
user_defined_grads=[np.zeros([2048, 1024], dtype="float32")], | ||
user_defined_grad_outputs=[ | ||
np.random.rand(2048, 1024).astype("float32") | ||
], | ||
) | ||
|
||
|
||
if __name__ == "__main__": | ||
unittest.main() |
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
397和398不用加,因为400~401行,你要把float转uint16