Skip to content

Overview

Reading Order Module.

Provides predictors for determining the logical reading sequence of document elements based on layout detection and spatial analysis.

Available Predictors
  • RuleBasedReadingOrderPredictor: Rule-based predictor using R-tree indexing
Example
from omnidocs.tasks.reading_order import RuleBasedReadingOrderPredictor
from omnidocs.tasks.layout_extraction import DocLayoutYOLO, DocLayoutYOLOConfig
from omnidocs.tasks.ocr_extraction import EasyOCR, EasyOCRConfig

# Initialize components
layout_extractor = DocLayoutYOLO(config=DocLayoutYOLOConfig())
ocr = EasyOCR(config=EasyOCRConfig())
predictor = RuleBasedReadingOrderPredictor()

# Process document
layout = layout_extractor.extract(image)
ocr_result = ocr.extract(image)
reading_order = predictor.predict(layout, ocr_result)

# Get text in reading order
text = reading_order.get_full_text()

# Get elements by type
tables = reading_order.get_elements_by_type(ElementType.TABLE)

# Get caption associations
for elem in reading_order.ordered_elements:
    if elem.element_type == ElementType.FIGURE:
        captions = reading_order.get_captions_for(elem.original_id)
        print(f"Figure {elem.original_id} captions: {[c.text for c in captions]}")

BaseReadingOrderPredictor

Bases: ABC

Abstract base class for reading order predictors.

Reading order predictors take layout detection and OCR results and produce a properly ordered sequence of document elements.

Example
predictor = RuleBasedReadingOrderPredictor()

# Get layout and OCR
layout = layout_extractor.extract(image)
ocr = ocr_extractor.extract(image)

# Predict reading order
result = predictor.predict(layout, ocr)

# Or with multiple pages
results = predictor.predict_multi_page(layouts, ocrs)

predict abstractmethod

predict(
    layout: LayoutOutput,
    ocr: Optional[OCROutput] = None,
    page_no: int = 0,
) -> ReadingOrderOutput

Predict reading order for a single page.

PARAMETER DESCRIPTION
layout

Layout detection results with bounding boxes

TYPE: LayoutOutput

ocr

Optional OCR results. If provided, text will be matched to layout elements by bbox overlap.

TYPE: Optional[OCROutput] DEFAULT: None

page_no

Page number (for multi-page documents)

TYPE: int DEFAULT: 0

RETURNS DESCRIPTION
ReadingOrderOutput

ReadingOrderOutput with ordered elements and associations

Example
layout = layout_extractor.extract(page_image)
ocr = ocr_extractor.extract(page_image)
order = predictor.predict(layout, ocr, page_no=0)
Source code in omnidocs/tasks/reading_order/base.py
@abstractmethod
def predict(
    self,
    layout: "LayoutOutput",
    ocr: Optional["OCROutput"] = None,
    page_no: int = 0,
) -> ReadingOrderOutput:
    """
    Predict reading order for a single page.

    Args:
        layout: Layout detection results with bounding boxes
        ocr: Optional OCR results. If provided, text will be
             matched to layout elements by bbox overlap.
        page_no: Page number (for multi-page documents)

    Returns:
        ReadingOrderOutput with ordered elements and associations

    Example:
        ```python
        layout = layout_extractor.extract(page_image)
        ocr = ocr_extractor.extract(page_image)
        order = predictor.predict(layout, ocr, page_no=0)
        ```
    """
    pass

predict_multi_page

predict_multi_page(
    layouts: List[LayoutOutput],
    ocrs: Optional[List[OCROutput]] = None,
) -> List[ReadingOrderOutput]

Predict reading order for multiple pages.

PARAMETER DESCRIPTION
layouts

List of layout results, one per page

TYPE: List[LayoutOutput]

ocrs

Optional list of OCR results, one per page

TYPE: Optional[List[OCROutput]] DEFAULT: None

RETURNS DESCRIPTION
List[ReadingOrderOutput]

List of ReadingOrderOutput, one per page

Source code in omnidocs/tasks/reading_order/base.py
def predict_multi_page(
    self,
    layouts: List["LayoutOutput"],
    ocrs: Optional[List["OCROutput"]] = None,
) -> List[ReadingOrderOutput]:
    """
    Predict reading order for multiple pages.

    Args:
        layouts: List of layout results, one per page
        ocrs: Optional list of OCR results, one per page

    Returns:
        List of ReadingOrderOutput, one per page
    """
    results = []

    for i, layout in enumerate(layouts):
        ocr = ocrs[i] if ocrs else None
        result = self.predict(layout, ocr, page_no=i)
        results.append(result)

    return results

BoundingBox

Bases: BaseModel

Bounding box in pixel coordinates.

width property

width: float

Width of the bounding box.

height property

height: float

Height of the bounding box.

center property

center: Tuple[float, float]

Center point of the bounding box.

to_list

to_list() -> List[float]

Convert to [x1, y1, x2, y2] list.

Source code in omnidocs/tasks/reading_order/models.py
def to_list(self) -> List[float]:
    """Convert to [x1, y1, x2, y2] list."""
    return [self.x1, self.y1, self.x2, self.y2]

from_list classmethod

from_list(coords: List[float]) -> BoundingBox

Create from [x1, y1, x2, y2] list.

Source code in omnidocs/tasks/reading_order/models.py
@classmethod
def from_list(cls, coords: List[float]) -> "BoundingBox":
    """Create from [x1, y1, x2, y2] list."""
    if len(coords) != 4:
        raise ValueError(f"Expected 4 coordinates, got {len(coords)}")
    return cls(x1=coords[0], y1=coords[1], x2=coords[2], y2=coords[3])

to_normalized

to_normalized(
    image_width: int, image_height: int
) -> BoundingBox

Convert to normalized coordinates (0-1024 range).

PARAMETER DESCRIPTION
image_width

Original image width in pixels

TYPE: int

image_height

Original image height in pixels

TYPE: int

RETURNS DESCRIPTION
BoundingBox

New BoundingBox with coordinates in 0-1024 range

Source code in omnidocs/tasks/reading_order/models.py
def to_normalized(self, image_width: int, image_height: int) -> "BoundingBox":
    """
    Convert to normalized coordinates (0-1024 range).

    Args:
        image_width: Original image width in pixels
        image_height: Original image height in pixels

    Returns:
        New BoundingBox with coordinates in 0-1024 range
    """
    return BoundingBox(
        x1=self.x1 / image_width * NORMALIZED_SIZE,
        y1=self.y1 / image_height * NORMALIZED_SIZE,
        x2=self.x2 / image_width * NORMALIZED_SIZE,
        y2=self.y2 / image_height * NORMALIZED_SIZE,
    )

ElementType

Bases: str, Enum

Type of document element for reading order.

OrderedElement

Bases: BaseModel

A document element with its reading order position.

Combines layout detection results with OCR text and assigns a reading order index.

to_dict

to_dict() -> Dict

Convert to dictionary representation.

Source code in omnidocs/tasks/reading_order/models.py
def to_dict(self) -> Dict:
    """Convert to dictionary representation."""
    return {
        "index": self.index,
        "element_type": self.element_type.value,
        "bbox": self.bbox.to_list(),
        "text": self.text,
        "confidence": self.confidence,
        "page_no": self.page_no,
        "original_id": self.original_id,
    }

ReadingOrderOutput

Bases: BaseModel

Complete reading order prediction result.

Provides: - Ordered list of document elements - Caption-to-element associations - Footnote-to-element associations - Merge suggestions for split elements

Example
result = predictor.predict(layout, ocr)

# Get full text in reading order
full_text = result.get_full_text()

# Get elements by type
tables = result.get_elements_by_type(ElementType.TABLE)

# Find caption for a figure
captions = result.get_captions_for(figure_element.original_id)

element_count property

element_count: int

Total number of ordered elements.

get_full_text

get_full_text(separator: str = '\n\n') -> str

Get concatenated text in reading order.

Excludes page headers, footers, captions, and footnotes from main text flow.

Source code in omnidocs/tasks/reading_order/models.py
def get_full_text(self, separator: str = "\n\n") -> str:
    """
    Get concatenated text in reading order.

    Excludes page headers, footers, captions, and footnotes
    from main text flow.
    """
    main_elements = [
        e
        for e in self.ordered_elements
        if e.element_type
        not in (
            ElementType.PAGE_HEADER,
            ElementType.PAGE_FOOTER,
            ElementType.CAPTION,
            ElementType.FOOTNOTE,
        )
    ]
    return separator.join(e.text for e in main_elements if e.text)

get_elements_by_type

get_elements_by_type(
    element_type: ElementType,
) -> List[OrderedElement]

Filter elements by type.

Source code in omnidocs/tasks/reading_order/models.py
def get_elements_by_type(self, element_type: ElementType) -> List[OrderedElement]:
    """Filter elements by type."""
    return [e for e in self.ordered_elements if e.element_type == element_type]

get_captions_for

get_captions_for(element_id: int) -> List[OrderedElement]

Get caption elements for a given element ID.

Source code in omnidocs/tasks/reading_order/models.py
def get_captions_for(self, element_id: int) -> List[OrderedElement]:
    """Get caption elements for a given element ID."""
    caption_ids = self.caption_map.get(element_id, [])
    return [e for e in self.ordered_elements if e.original_id in caption_ids]

get_footnotes_for

get_footnotes_for(element_id: int) -> List[OrderedElement]

Get footnote elements for a given element ID.

Source code in omnidocs/tasks/reading_order/models.py
def get_footnotes_for(self, element_id: int) -> List[OrderedElement]:
    """Get footnote elements for a given element ID."""
    footnote_ids = self.footnote_map.get(element_id, [])
    return [e for e in self.ordered_elements if e.original_id in footnote_ids]

to_dict

to_dict() -> Dict

Convert to dictionary representation.

Source code in omnidocs/tasks/reading_order/models.py
def to_dict(self) -> Dict:
    """Convert to dictionary representation."""
    return {
        "ordered_elements": [e.to_dict() for e in self.ordered_elements],
        "caption_map": self.caption_map,
        "footnote_map": self.footnote_map,
        "merge_map": self.merge_map,
        "image_width": self.image_width,
        "image_height": self.image_height,
        "element_count": self.element_count,
    }

save_json

save_json(file_path: Union[str, Path]) -> None

Save to JSON file.

Source code in omnidocs/tasks/reading_order/models.py
def save_json(self, file_path: Union[str, Path]) -> None:
    """Save to JSON file."""
    path = Path(file_path)
    path.parent.mkdir(parents=True, exist_ok=True)
    path.write_text(self.model_dump_json(indent=2), encoding="utf-8")

load_json classmethod

load_json(
    file_path: Union[str, Path],
) -> ReadingOrderOutput

Load from JSON file.

Source code in omnidocs/tasks/reading_order/models.py
@classmethod
def load_json(cls, file_path: Union[str, Path]) -> "ReadingOrderOutput":
    """Load from JSON file."""
    path = Path(file_path)
    return cls.model_validate_json(path.read_text(encoding="utf-8"))

RuleBasedReadingOrderPredictor

RuleBasedReadingOrderPredictor()

Bases: BaseReadingOrderPredictor

Rule-based reading order predictor using spatial analysis.

Uses R-tree spatial indexing and rule-based algorithms to determine the logical reading sequence of document elements. This is a CPU-only implementation that doesn't require GPU resources.

Features: - Multi-column layout detection - Header/footer separation - Caption-to-figure/table association - Footnote linking - Element merge suggestions

Example
from omnidocs.tasks.reading_order import RuleBasedReadingOrderPredictor
from omnidocs.tasks.layout_extraction import DocLayoutYOLO, DocLayoutYOLOConfig
from omnidocs.tasks.ocr_extraction import EasyOCR, EasyOCRConfig

# Initialize components
layout_extractor = DocLayoutYOLO(config=DocLayoutYOLOConfig())
ocr = EasyOCR(config=EasyOCRConfig())
predictor = RuleBasedReadingOrderPredictor()

# Process document
layout = layout_extractor.extract(image)
ocr_result = ocr.extract(image)
reading_order = predictor.predict(layout, ocr_result)

# Get text in reading order
text = reading_order.get_full_text()

Initialize the reading order predictor.

Source code in omnidocs/tasks/reading_order/rule_based/predictor.py
def __init__(self):
    """Initialize the reading order predictor."""
    self.dilated_page_element = True
    # Apply horizontal dilation only if less than this page-width normalized threshold
    self._horizontal_dilation_threshold_norm = 0.15

predict

predict(
    layout: LayoutOutput,
    ocr: Optional[OCROutput] = None,
    page_no: int = 0,
) -> ReadingOrderOutput

Predict reading order for a single page.

PARAMETER DESCRIPTION
layout

Layout detection results with bounding boxes

TYPE: LayoutOutput

ocr

Optional OCR results for text content

TYPE: Optional[OCROutput] DEFAULT: None

page_no

Page number (for multi-page documents)

TYPE: int DEFAULT: 0

RETURNS DESCRIPTION
ReadingOrderOutput

ReadingOrderOutput with ordered elements and associations

Source code in omnidocs/tasks/reading_order/rule_based/predictor.py
def predict(
    self,
    layout: "LayoutOutput",
    ocr: Optional["OCROutput"] = None,
    page_no: int = 0,
) -> ReadingOrderOutput:
    """
    Predict reading order for a single page.

    Args:
        layout: Layout detection results with bounding boxes
        ocr: Optional OCR results for text content
        page_no: Page number (for multi-page documents)

    Returns:
        ReadingOrderOutput with ordered elements and associations
    """
    page_width = layout.image_width
    page_height = layout.image_height

    # Build text map from OCR if available
    text_map: Dict[int, str] = {}
    if ocr:
        text_map = self._build_text_map(layout, ocr)

    # Convert layout boxes to internal PageElements
    page_elements: List[_PageElement] = []
    for i, box in enumerate(layout.bboxes):
        label_str = box.label.value.lower()
        element_type = LABEL_TO_ELEMENT_TYPE.get(label_str, ElementType.OTHER)

        # Convert from top-left origin to bottom-left origin
        elem = _PageElement(
            cid=i,
            text=text_map.get(i, ""),
            page_no=page_no,
            page_width=page_width,
            page_height=page_height,
            label=element_type,
            left=box.bbox.x1,
            bottom=page_height - box.bbox.y2,  # Convert y2 to bottom
            right=box.bbox.x2,
            top=page_height - box.bbox.y1,  # Convert y1 to top
        )
        page_elements.append(elem)

    # Run reading order prediction
    sorted_elements = self._predict_reading_order(page_elements)

    # Get caption associations
    caption_map = self._find_to_captions(sorted_elements)

    # Get footnote associations
    footnote_map = self._find_to_footnotes(sorted_elements)

    # Get merge suggestions
    merge_map = self._predict_merges(sorted_elements)

    # Convert to OrderedElements
    ordered_elements: List[OrderedElement] = []
    for idx, elem in enumerate(sorted_elements):
        # Convert back from bottom-left to top-left origin
        bbox = BoundingBox(
            x1=elem.left,
            y1=page_height - elem.top,
            x2=elem.right,
            y2=page_height - elem.bottom,
        )

        confidence = 1.0
        if elem.cid < len(layout.bboxes):
            confidence = layout.bboxes[elem.cid].confidence

        ordered_elem = OrderedElement(
            index=idx,
            element_type=elem.label,
            bbox=bbox,
            text=elem.text,
            confidence=confidence,
            page_no=page_no,
            original_id=elem.cid,
        )
        ordered_elements.append(ordered_elem)

    return ReadingOrderOutput(
        ordered_elements=ordered_elements,
        caption_map=caption_map,
        footnote_map=footnote_map,
        merge_map=merge_map,
        image_width=page_width,
        image_height=page_height,
        model_name="RuleBasedReadingOrderPredictor",
    )

base

Base class for reading order predictors.

Defines the abstract interface that all reading order predictors must implement.

BaseReadingOrderPredictor

Bases: ABC

Abstract base class for reading order predictors.

Reading order predictors take layout detection and OCR results and produce a properly ordered sequence of document elements.

Example
predictor = RuleBasedReadingOrderPredictor()

# Get layout and OCR
layout = layout_extractor.extract(image)
ocr = ocr_extractor.extract(image)

# Predict reading order
result = predictor.predict(layout, ocr)

# Or with multiple pages
results = predictor.predict_multi_page(layouts, ocrs)

predict abstractmethod

predict(
    layout: LayoutOutput,
    ocr: Optional[OCROutput] = None,
    page_no: int = 0,
) -> ReadingOrderOutput

Predict reading order for a single page.

PARAMETER DESCRIPTION
layout

Layout detection results with bounding boxes

TYPE: LayoutOutput

ocr

Optional OCR results. If provided, text will be matched to layout elements by bbox overlap.

TYPE: Optional[OCROutput] DEFAULT: None

page_no

Page number (for multi-page documents)

TYPE: int DEFAULT: 0

RETURNS DESCRIPTION
ReadingOrderOutput

ReadingOrderOutput with ordered elements and associations

Example
layout = layout_extractor.extract(page_image)
ocr = ocr_extractor.extract(page_image)
order = predictor.predict(layout, ocr, page_no=0)
Source code in omnidocs/tasks/reading_order/base.py
@abstractmethod
def predict(
    self,
    layout: "LayoutOutput",
    ocr: Optional["OCROutput"] = None,
    page_no: int = 0,
) -> ReadingOrderOutput:
    """
    Predict reading order for a single page.

    Args:
        layout: Layout detection results with bounding boxes
        ocr: Optional OCR results. If provided, text will be
             matched to layout elements by bbox overlap.
        page_no: Page number (for multi-page documents)

    Returns:
        ReadingOrderOutput with ordered elements and associations

    Example:
        ```python
        layout = layout_extractor.extract(page_image)
        ocr = ocr_extractor.extract(page_image)
        order = predictor.predict(layout, ocr, page_no=0)
        ```
    """
    pass

predict_multi_page

predict_multi_page(
    layouts: List[LayoutOutput],
    ocrs: Optional[List[OCROutput]] = None,
) -> List[ReadingOrderOutput]

Predict reading order for multiple pages.

PARAMETER DESCRIPTION
layouts

List of layout results, one per page

TYPE: List[LayoutOutput]

ocrs

Optional list of OCR results, one per page

TYPE: Optional[List[OCROutput]] DEFAULT: None

RETURNS DESCRIPTION
List[ReadingOrderOutput]

List of ReadingOrderOutput, one per page

Source code in omnidocs/tasks/reading_order/base.py
def predict_multi_page(
    self,
    layouts: List["LayoutOutput"],
    ocrs: Optional[List["OCROutput"]] = None,
) -> List[ReadingOrderOutput]:
    """
    Predict reading order for multiple pages.

    Args:
        layouts: List of layout results, one per page
        ocrs: Optional list of OCR results, one per page

    Returns:
        List of ReadingOrderOutput, one per page
    """
    results = []

    for i, layout in enumerate(layouts):
        ocr = ocrs[i] if ocrs else None
        result = self.predict(layout, ocr, page_no=i)
        results.append(result)

    return results

models

Pydantic models for reading order prediction.

Takes layout detection and OCR results, produces ordered element sequence with caption and footnote associations.

Example
# Get layout and OCR
layout = layout_extractor.extract(image)
ocr = ocr_extractor.extract(image)

# Predict reading order
reading_order = predictor.predict(layout, ocr)

# Iterate in reading order
for element in reading_order.ordered_elements:
    print(f"{element.index}: [{element.element_type}] {element.text[:50]}...")

# Get caption associations
for fig_id, caption_ids in reading_order.caption_map.items():
    print(f"Figure {fig_id} has captions: {caption_ids}")

ElementType

Bases: str, Enum

Type of document element for reading order.

BoundingBox

Bases: BaseModel

Bounding box in pixel coordinates.

width property

width: float

Width of the bounding box.

height property

height: float

Height of the bounding box.

center property

center: Tuple[float, float]

Center point of the bounding box.

to_list

to_list() -> List[float]

Convert to [x1, y1, x2, y2] list.

Source code in omnidocs/tasks/reading_order/models.py
def to_list(self) -> List[float]:
    """Convert to [x1, y1, x2, y2] list."""
    return [self.x1, self.y1, self.x2, self.y2]

from_list classmethod

from_list(coords: List[float]) -> BoundingBox

Create from [x1, y1, x2, y2] list.

Source code in omnidocs/tasks/reading_order/models.py
@classmethod
def from_list(cls, coords: List[float]) -> "BoundingBox":
    """Create from [x1, y1, x2, y2] list."""
    if len(coords) != 4:
        raise ValueError(f"Expected 4 coordinates, got {len(coords)}")
    return cls(x1=coords[0], y1=coords[1], x2=coords[2], y2=coords[3])

to_normalized

to_normalized(
    image_width: int, image_height: int
) -> BoundingBox

Convert to normalized coordinates (0-1024 range).

PARAMETER DESCRIPTION
image_width

Original image width in pixels

TYPE: int

image_height

Original image height in pixels

TYPE: int

RETURNS DESCRIPTION
BoundingBox

New BoundingBox with coordinates in 0-1024 range

Source code in omnidocs/tasks/reading_order/models.py
def to_normalized(self, image_width: int, image_height: int) -> "BoundingBox":
    """
    Convert to normalized coordinates (0-1024 range).

    Args:
        image_width: Original image width in pixels
        image_height: Original image height in pixels

    Returns:
        New BoundingBox with coordinates in 0-1024 range
    """
    return BoundingBox(
        x1=self.x1 / image_width * NORMALIZED_SIZE,
        y1=self.y1 / image_height * NORMALIZED_SIZE,
        x2=self.x2 / image_width * NORMALIZED_SIZE,
        y2=self.y2 / image_height * NORMALIZED_SIZE,
    )

OrderedElement

Bases: BaseModel

A document element with its reading order position.

Combines layout detection results with OCR text and assigns a reading order index.

to_dict

to_dict() -> Dict

Convert to dictionary representation.

Source code in omnidocs/tasks/reading_order/models.py
def to_dict(self) -> Dict:
    """Convert to dictionary representation."""
    return {
        "index": self.index,
        "element_type": self.element_type.value,
        "bbox": self.bbox.to_list(),
        "text": self.text,
        "confidence": self.confidence,
        "page_no": self.page_no,
        "original_id": self.original_id,
    }

ReadingOrderOutput

Bases: BaseModel

Complete reading order prediction result.

Provides: - Ordered list of document elements - Caption-to-element associations - Footnote-to-element associations - Merge suggestions for split elements

Example
result = predictor.predict(layout, ocr)

# Get full text in reading order
full_text = result.get_full_text()

# Get elements by type
tables = result.get_elements_by_type(ElementType.TABLE)

# Find caption for a figure
captions = result.get_captions_for(figure_element.original_id)

element_count property

element_count: int

Total number of ordered elements.

get_full_text

get_full_text(separator: str = '\n\n') -> str

Get concatenated text in reading order.

Excludes page headers, footers, captions, and footnotes from main text flow.

Source code in omnidocs/tasks/reading_order/models.py
def get_full_text(self, separator: str = "\n\n") -> str:
    """
    Get concatenated text in reading order.

    Excludes page headers, footers, captions, and footnotes
    from main text flow.
    """
    main_elements = [
        e
        for e in self.ordered_elements
        if e.element_type
        not in (
            ElementType.PAGE_HEADER,
            ElementType.PAGE_FOOTER,
            ElementType.CAPTION,
            ElementType.FOOTNOTE,
        )
    ]
    return separator.join(e.text for e in main_elements if e.text)

get_elements_by_type

get_elements_by_type(
    element_type: ElementType,
) -> List[OrderedElement]

Filter elements by type.

Source code in omnidocs/tasks/reading_order/models.py
def get_elements_by_type(self, element_type: ElementType) -> List[OrderedElement]:
    """Filter elements by type."""
    return [e for e in self.ordered_elements if e.element_type == element_type]

get_captions_for

get_captions_for(element_id: int) -> List[OrderedElement]

Get caption elements for a given element ID.

Source code in omnidocs/tasks/reading_order/models.py
def get_captions_for(self, element_id: int) -> List[OrderedElement]:
    """Get caption elements for a given element ID."""
    caption_ids = self.caption_map.get(element_id, [])
    return [e for e in self.ordered_elements if e.original_id in caption_ids]

get_footnotes_for

get_footnotes_for(element_id: int) -> List[OrderedElement]

Get footnote elements for a given element ID.

Source code in omnidocs/tasks/reading_order/models.py
def get_footnotes_for(self, element_id: int) -> List[OrderedElement]:
    """Get footnote elements for a given element ID."""
    footnote_ids = self.footnote_map.get(element_id, [])
    return [e for e in self.ordered_elements if e.original_id in footnote_ids]

to_dict

to_dict() -> Dict

Convert to dictionary representation.

Source code in omnidocs/tasks/reading_order/models.py
def to_dict(self) -> Dict:
    """Convert to dictionary representation."""
    return {
        "ordered_elements": [e.to_dict() for e in self.ordered_elements],
        "caption_map": self.caption_map,
        "footnote_map": self.footnote_map,
        "merge_map": self.merge_map,
        "image_width": self.image_width,
        "image_height": self.image_height,
        "element_count": self.element_count,
    }

save_json

save_json(file_path: Union[str, Path]) -> None

Save to JSON file.

Source code in omnidocs/tasks/reading_order/models.py
def save_json(self, file_path: Union[str, Path]) -> None:
    """Save to JSON file."""
    path = Path(file_path)
    path.parent.mkdir(parents=True, exist_ok=True)
    path.write_text(self.model_dump_json(indent=2), encoding="utf-8")

load_json classmethod

load_json(
    file_path: Union[str, Path],
) -> ReadingOrderOutput

Load from JSON file.

Source code in omnidocs/tasks/reading_order/models.py
@classmethod
def load_json(cls, file_path: Union[str, Path]) -> "ReadingOrderOutput":
    """Load from JSON file."""
    path = Path(file_path)
    return cls.model_validate_json(path.read_text(encoding="utf-8"))

rule_based

Rule-based reading order predictor module.

Provides rule-based reading order prediction using spatial analysis.

RuleBasedReadingOrderPredictor

RuleBasedReadingOrderPredictor()

Bases: BaseReadingOrderPredictor

Rule-based reading order predictor using spatial analysis.

Uses R-tree spatial indexing and rule-based algorithms to determine the logical reading sequence of document elements. This is a CPU-only implementation that doesn't require GPU resources.

Features: - Multi-column layout detection - Header/footer separation - Caption-to-figure/table association - Footnote linking - Element merge suggestions

Example
from omnidocs.tasks.reading_order import RuleBasedReadingOrderPredictor
from omnidocs.tasks.layout_extraction import DocLayoutYOLO, DocLayoutYOLOConfig
from omnidocs.tasks.ocr_extraction import EasyOCR, EasyOCRConfig

# Initialize components
layout_extractor = DocLayoutYOLO(config=DocLayoutYOLOConfig())
ocr = EasyOCR(config=EasyOCRConfig())
predictor = RuleBasedReadingOrderPredictor()

# Process document
layout = layout_extractor.extract(image)
ocr_result = ocr.extract(image)
reading_order = predictor.predict(layout, ocr_result)

# Get text in reading order
text = reading_order.get_full_text()

Initialize the reading order predictor.

Source code in omnidocs/tasks/reading_order/rule_based/predictor.py
def __init__(self):
    """Initialize the reading order predictor."""
    self.dilated_page_element = True
    # Apply horizontal dilation only if less than this page-width normalized threshold
    self._horizontal_dilation_threshold_norm = 0.15

predict

predict(
    layout: LayoutOutput,
    ocr: Optional[OCROutput] = None,
    page_no: int = 0,
) -> ReadingOrderOutput

Predict reading order for a single page.

PARAMETER DESCRIPTION
layout

Layout detection results with bounding boxes

TYPE: LayoutOutput

ocr

Optional OCR results for text content

TYPE: Optional[OCROutput] DEFAULT: None

page_no

Page number (for multi-page documents)

TYPE: int DEFAULT: 0

RETURNS DESCRIPTION
ReadingOrderOutput

ReadingOrderOutput with ordered elements and associations

Source code in omnidocs/tasks/reading_order/rule_based/predictor.py
def predict(
    self,
    layout: "LayoutOutput",
    ocr: Optional["OCROutput"] = None,
    page_no: int = 0,
) -> ReadingOrderOutput:
    """
    Predict reading order for a single page.

    Args:
        layout: Layout detection results with bounding boxes
        ocr: Optional OCR results for text content
        page_no: Page number (for multi-page documents)

    Returns:
        ReadingOrderOutput with ordered elements and associations
    """
    page_width = layout.image_width
    page_height = layout.image_height

    # Build text map from OCR if available
    text_map: Dict[int, str] = {}
    if ocr:
        text_map = self._build_text_map(layout, ocr)

    # Convert layout boxes to internal PageElements
    page_elements: List[_PageElement] = []
    for i, box in enumerate(layout.bboxes):
        label_str = box.label.value.lower()
        element_type = LABEL_TO_ELEMENT_TYPE.get(label_str, ElementType.OTHER)

        # Convert from top-left origin to bottom-left origin
        elem = _PageElement(
            cid=i,
            text=text_map.get(i, ""),
            page_no=page_no,
            page_width=page_width,
            page_height=page_height,
            label=element_type,
            left=box.bbox.x1,
            bottom=page_height - box.bbox.y2,  # Convert y2 to bottom
            right=box.bbox.x2,
            top=page_height - box.bbox.y1,  # Convert y1 to top
        )
        page_elements.append(elem)

    # Run reading order prediction
    sorted_elements = self._predict_reading_order(page_elements)

    # Get caption associations
    caption_map = self._find_to_captions(sorted_elements)

    # Get footnote associations
    footnote_map = self._find_to_footnotes(sorted_elements)

    # Get merge suggestions
    merge_map = self._predict_merges(sorted_elements)

    # Convert to OrderedElements
    ordered_elements: List[OrderedElement] = []
    for idx, elem in enumerate(sorted_elements):
        # Convert back from bottom-left to top-left origin
        bbox = BoundingBox(
            x1=elem.left,
            y1=page_height - elem.top,
            x2=elem.right,
            y2=page_height - elem.bottom,
        )

        confidence = 1.0
        if elem.cid < len(layout.bboxes):
            confidence = layout.bboxes[elem.cid].confidence

        ordered_elem = OrderedElement(
            index=idx,
            element_type=elem.label,
            bbox=bbox,
            text=elem.text,
            confidence=confidence,
            page_no=page_no,
            original_id=elem.cid,
        )
        ordered_elements.append(ordered_elem)

    return ReadingOrderOutput(
        ordered_elements=ordered_elements,
        caption_map=caption_map,
        footnote_map=footnote_map,
        merge_map=merge_map,
        image_width=page_width,
        image_height=page_height,
        model_name="RuleBasedReadingOrderPredictor",
    )

predictor

Rule-based reading order predictor.

Uses spatial analysis and R-tree indexing to determine the logical reading sequence of document elements. Self-contained implementation without external dependencies on docling-ibm-models.

Based on the algorithm from docling-ibm-models, adapted for omnidocs.

RuleBasedReadingOrderPredictor

RuleBasedReadingOrderPredictor()

Bases: BaseReadingOrderPredictor

Rule-based reading order predictor using spatial analysis.

Uses R-tree spatial indexing and rule-based algorithms to determine the logical reading sequence of document elements. This is a CPU-only implementation that doesn't require GPU resources.

Features: - Multi-column layout detection - Header/footer separation - Caption-to-figure/table association - Footnote linking - Element merge suggestions

Example
from omnidocs.tasks.reading_order import RuleBasedReadingOrderPredictor
from omnidocs.tasks.layout_extraction import DocLayoutYOLO, DocLayoutYOLOConfig
from omnidocs.tasks.ocr_extraction import EasyOCR, EasyOCRConfig

# Initialize components
layout_extractor = DocLayoutYOLO(config=DocLayoutYOLOConfig())
ocr = EasyOCR(config=EasyOCRConfig())
predictor = RuleBasedReadingOrderPredictor()

# Process document
layout = layout_extractor.extract(image)
ocr_result = ocr.extract(image)
reading_order = predictor.predict(layout, ocr_result)

# Get text in reading order
text = reading_order.get_full_text()

Initialize the reading order predictor.

Source code in omnidocs/tasks/reading_order/rule_based/predictor.py
def __init__(self):
    """Initialize the reading order predictor."""
    self.dilated_page_element = True
    # Apply horizontal dilation only if less than this page-width normalized threshold
    self._horizontal_dilation_threshold_norm = 0.15
predict
predict(
    layout: LayoutOutput,
    ocr: Optional[OCROutput] = None,
    page_no: int = 0,
) -> ReadingOrderOutput

Predict reading order for a single page.

PARAMETER DESCRIPTION
layout

Layout detection results with bounding boxes

TYPE: LayoutOutput

ocr

Optional OCR results for text content

TYPE: Optional[OCROutput] DEFAULT: None

page_no

Page number (for multi-page documents)

TYPE: int DEFAULT: 0

RETURNS DESCRIPTION
ReadingOrderOutput

ReadingOrderOutput with ordered elements and associations

Source code in omnidocs/tasks/reading_order/rule_based/predictor.py
def predict(
    self,
    layout: "LayoutOutput",
    ocr: Optional["OCROutput"] = None,
    page_no: int = 0,
) -> ReadingOrderOutput:
    """
    Predict reading order for a single page.

    Args:
        layout: Layout detection results with bounding boxes
        ocr: Optional OCR results for text content
        page_no: Page number (for multi-page documents)

    Returns:
        ReadingOrderOutput with ordered elements and associations
    """
    page_width = layout.image_width
    page_height = layout.image_height

    # Build text map from OCR if available
    text_map: Dict[int, str] = {}
    if ocr:
        text_map = self._build_text_map(layout, ocr)

    # Convert layout boxes to internal PageElements
    page_elements: List[_PageElement] = []
    for i, box in enumerate(layout.bboxes):
        label_str = box.label.value.lower()
        element_type = LABEL_TO_ELEMENT_TYPE.get(label_str, ElementType.OTHER)

        # Convert from top-left origin to bottom-left origin
        elem = _PageElement(
            cid=i,
            text=text_map.get(i, ""),
            page_no=page_no,
            page_width=page_width,
            page_height=page_height,
            label=element_type,
            left=box.bbox.x1,
            bottom=page_height - box.bbox.y2,  # Convert y2 to bottom
            right=box.bbox.x2,
            top=page_height - box.bbox.y1,  # Convert y1 to top
        )
        page_elements.append(elem)

    # Run reading order prediction
    sorted_elements = self._predict_reading_order(page_elements)

    # Get caption associations
    caption_map = self._find_to_captions(sorted_elements)

    # Get footnote associations
    footnote_map = self._find_to_footnotes(sorted_elements)

    # Get merge suggestions
    merge_map = self._predict_merges(sorted_elements)

    # Convert to OrderedElements
    ordered_elements: List[OrderedElement] = []
    for idx, elem in enumerate(sorted_elements):
        # Convert back from bottom-left to top-left origin
        bbox = BoundingBox(
            x1=elem.left,
            y1=page_height - elem.top,
            x2=elem.right,
            y2=page_height - elem.bottom,
        )

        confidence = 1.0
        if elem.cid < len(layout.bboxes):
            confidence = layout.bboxes[elem.cid].confidence

        ordered_elem = OrderedElement(
            index=idx,
            element_type=elem.label,
            bbox=bbox,
            text=elem.text,
            confidence=confidence,
            page_no=page_no,
            original_id=elem.cid,
        )
        ordered_elements.append(ordered_elem)

    return ReadingOrderOutput(
        ordered_elements=ordered_elements,
        caption_map=caption_map,
        footnote_map=footnote_map,
        merge_map=merge_map,
        image_width=page_width,
        image_height=page_height,
        model_name="RuleBasedReadingOrderPredictor",
    )