Skip to content

Commit

Permalink
remove unused import (#2679)
Browse files Browse the repository at this point in the history
### What problem does this PR solve?

### Type of change

- [x] Refactoring
  • Loading branch information
yqkcn authored Sep 30, 2024
1 parent ae5a877 commit 570ad42
Show file tree
Hide file tree
Showing 7 changed files with 10 additions and 18 deletions.
2 changes: 0 additions & 2 deletions rag/app/audio.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
import io
import re
import numpy as np

from api.db import LLMType
from rag.nlp import rag_tokenizer
Expand Down
6 changes: 3 additions & 3 deletions rag/app/book.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,9 @@
from io import BytesIO

from deepdoc.parser.utils import get_text
from rag.nlp import bullets_category, is_english, tokenize, remove_contents_table, \
hierarchical_merge, make_colon_as_title, naive_merge, random_choices, tokenize_table, add_positions, \
tokenize_chunks, find_codec
from rag.nlp import bullets_category, is_english,remove_contents_table, \
hierarchical_merge, make_colon_as_title, naive_merge, random_choices, tokenize_table, \
tokenize_chunks
from rag.nlp import rag_tokenizer
from deepdoc.parser import PdfParser, DocxParser, PlainParser, HtmlParser

Expand Down
5 changes: 2 additions & 3 deletions rag/app/laws.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,16 +10,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
from tika import parser
import re
from io import BytesIO
from docx import Document

from api.db import ParserType
from deepdoc.parser.utils import get_text
from rag.nlp import bullets_category, is_english, tokenize, remove_contents_table, hierarchical_merge, \
make_colon_as_title, add_positions, tokenize_chunks, find_codec, docx_question_level
from rag.nlp import bullets_category, remove_contents_table, hierarchical_merge, \
make_colon_as_title, tokenize_chunks, docx_question_level
from rag.nlp import rag_tokenizer
from deepdoc.parser import PdfParser, DocxParser, PlainParser, HtmlParser
from rag.settings import cron_logger
Expand Down
6 changes: 3 additions & 3 deletions rag/app/manual.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,13 +19,13 @@

from api.db import ParserType
from io import BytesIO
from rag.nlp import rag_tokenizer, tokenize, tokenize_table, add_positions, bullets_category, title_frequency, tokenize_chunks, docx_question_level
from deepdoc.parser import PdfParser, PlainParser
from rag.nlp import rag_tokenizer, tokenize, tokenize_table, bullets_category, title_frequency, tokenize_chunks, docx_question_level
from rag.utils import num_tokens_from_string
from deepdoc.parser import PdfParser, ExcelParser, DocxParser
from deepdoc.parser import PdfParser, PlainParser, DocxParser
from docx import Document
from PIL import Image


class Pdf(PdfParser):
def __init__(self):
self.model_speciess = ParserType.MANUAL.value
Expand Down
5 changes: 2 additions & 3 deletions rag/app/naive.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
from markdown import markdown
from docx.image.exceptions import UnrecognizedImageError


class Docx(DocxParser):
def __init__(self):
pass
Expand Down Expand Up @@ -93,7 +94,7 @@ def __call__(self, filename, binary=None, from_page=0, to_page=100000):

tbls = []
for tb in self.doc.tables:
html= "<table>"
html = "<table>"
for r in tb.rows:
html += "<tr>"
i = 0
Expand Down Expand Up @@ -146,8 +147,6 @@ def __call__(self, filename, binary=None, from_page=0,

class Markdown(MarkdownParser):
def __call__(self, filename, binary=None):
txt = ""
tbls = []
if binary:
encoding = find_codec(binary)
txt = binary.decode(encoding, errors="ignore")
Expand Down
3 changes: 0 additions & 3 deletions rag/app/paper.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,13 +12,11 @@
#
import copy
import re
from collections import Counter

from api.db import ParserType
from rag.nlp import rag_tokenizer, tokenize, tokenize_table, add_positions, bullets_category, title_frequency, tokenize_chunks
from deepdoc.parser import PdfParser, PlainParser
import numpy as np
from rag.utils import num_tokens_from_string


class Pdf(PdfParser):
Expand Down Expand Up @@ -135,7 +133,6 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
Only pdf is supported.
The abstract of the paper will be sliced as an entire chunk, and will not be sliced partly.
"""
pdf_parser = None
if re.search(r"\.pdf$", filename, re.IGNORECASE):
if not kwargs.get("parser_config", {}).get("layout_recognize", True):
pdf_parser = PlainParser()
Expand Down
1 change: 0 additions & 1 deletion rag/app/qa.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@
from copy import deepcopy
from io import BytesIO
from timeit import default_timer as timer
from nltk import word_tokenize
from openpyxl import load_workbook

from deepdoc.parser.utils import get_text
Expand Down

0 comments on commit 570ad42

Please sign in to comment.