diff --git a/tokenizers/src/normalizers/byte_level.rs b/tokenizers/src/normalizers/byte_level.rs
index 9d52f7eaa..edc32bfa8 100644
--- a/tokenizers/src/normalizers/byte_level.rs
+++ b/tokenizers/src/normalizers/byte_level.rs
@@ -1,6 +1,5 @@
use crate::processors::byte_level::bytes_char;
use crate::tokenizer::{NormalizedString, Normalizer, Result};
-use serde::{Deserialize, Serialize};
use std::collections::{HashMap, HashSet};
use crate::utils::macro_rules_attribute;
diff --git a/tokenizers/src/normalizers/mod.rs b/tokenizers/src/normalizers/mod.rs
index 43f50bfda..9dd549a63 100644
--- a/tokenizers/src/normalizers/mod.rs
+++ b/tokenizers/src/normalizers/mod.rs
@@ -88,12 +88,14 @@ mod tests {
let json =
r#"{"sep":["",2], "cls":["",0], "trim_offsets":true, "add_prefix_space":true}"#;
- let reconstructed = serde_json::from_str::(json).unwrap();
- println!("{:?}", reconstructed);
- assert!(matches!(
- reconstructed,
- NormalizerWrapper::Sequence(_)
- ));
+ let reconstructed = serde_json::from_str::(json);
+ match reconstructed {
+ Err(err) => assert_eq!(
+ err.to_string(),
+ "data did not match any variant of untagged enum NormalizerWrapper"
+ ),
+ _ => panic!("Expected an error here"),
+ }
let json = r#"{"type":"RobertaProcessing", "sep":["",2] }"#;
let reconstructed = serde_json::from_str::(json);