davanstrien HF Staff Claude Opus 4.6 commited on
Commit
9374949
·
1 Parent(s): bcd25b7

Fix config_name=None breaking push_to_hub + use cu129 nightly index

Browse files

- Replace config_name=config with conditional spread to avoid passing
None, which generates invalid YAML (config_name: null) that fails
Hub validation. Applied to all 10 scripts with --config flag.
- Switch vLLM nightly URL from /nightly to /nightly/cu129 in 6 scripts
to get x86_64 wheels (bare /nightly only has ARM wheels currently).
- Includes accumulated improvements: --verbose flag, --config/--create-pr
support, upload retry with XET fallback, inference_info fixes.
- Adds dots-ocr-1.5.py and hunyuan-ocr.py (new scripts).

Verified via HF Jobs smoke tests (7/9 config-flag scripts completed).

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

deepseek-ocr-vllm.py CHANGED
@@ -11,7 +11,7 @@
11
  # ]
12
  #
13
  # [[tool.uv.index]]
14
- # url = "https://wheels.vllm.ai/nightly"
15
  #
16
  # [tool.uv]
17
  # prerelease = "allow"
@@ -215,6 +215,7 @@ def main(
215
  seed: int = 42,
216
  config: str = None,
217
  create_pr: bool = False,
 
218
  ):
219
  """Process images from HF dataset through DeepSeek-OCR model with vLLM."""
220
 
@@ -334,41 +335,40 @@ def main(
334
  # Handle inference_info tracking
335
  logger.info("Updating inference_info...")
336
 
337
- # Check for existing inference_info
338
- if "inference_info" in dataset.column_names:
339
- # Parse existing info from first row (all rows have same info)
340
- try:
341
- existing_info = json.loads(dataset[0]["inference_info"])
342
- if not isinstance(existing_info, list):
343
- existing_info = [existing_info] # Convert old format to list
344
- except (json.JSONDecodeError, TypeError):
345
- existing_info = []
346
- # Remove old column to update it
347
- dataset = dataset.remove_columns(["inference_info"])
348
- else:
349
- existing_info = []
350
-
351
- # Add new inference info
352
- new_info = {
353
- "column_name": "markdown",
354
  "model_id": model,
355
- "processing_date": datetime.now().isoformat(),
356
- "prompt": final_prompt,
 
357
  "prompt_mode": prompt_mode if prompt is None else "custom",
358
  "batch_size": batch_size,
359
  "max_tokens": max_tokens,
360
  "gpu_memory_utilization": gpu_memory_utilization,
361
  "max_model_len": max_model_len,
362
  "script": "deepseek-ocr-vllm.py",
363
- "script_version": "2.0.0",
364
  "script_url": "https://huggingface.co/datasets/uv-scripts/ocr/raw/main/deepseek-ocr-vllm.py",
365
- "implementation": "vllm (batch processing, llm.generate + NGramPerReqLogitsProcessor)",
366
  }
367
- existing_info.append(new_info)
368
 
369
- # Add updated inference_info column
370
- info_json = json.dumps(existing_info, ensure_ascii=False)
371
- dataset = dataset.add_column("inference_info", [info_json] * len(dataset))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
372
 
373
  # Push to hub
374
  logger.info(f"Pushing to {output_dataset}")
@@ -376,7 +376,7 @@ def main(
376
  output_dataset,
377
  private=private,
378
  token=HF_TOKEN,
379
- config_name=config,
380
  create_pr=create_pr,
381
  commit_message=f"Add {model} OCR results ({len(dataset)} samples)"
382
  + (f" [{config}]" if config else ""),
@@ -407,6 +407,17 @@ def main(
407
  )
408
  logger.info(f"Processing time: {processing_time_str}")
409
 
 
 
 
 
 
 
 
 
 
 
 
410
 
411
  if __name__ == "__main__":
412
  # Show example usage if no arguments
@@ -560,6 +571,11 @@ Examples:
560
  default=42,
561
  help="Random seed for shuffling (default: 42)",
562
  )
 
 
 
 
 
563
 
564
  args = parser.parse_args()
565
 
@@ -582,4 +598,5 @@ Examples:
582
  seed=args.seed,
583
  config=args.config,
584
  create_pr=args.create_pr,
 
585
  )
 
11
  # ]
12
  #
13
  # [[tool.uv.index]]
14
+ # url = "https://wheels.vllm.ai/nightly/cu129"
15
  #
16
  # [tool.uv]
17
  # prerelease = "allow"
 
215
  seed: int = 42,
216
  config: str = None,
217
  create_pr: bool = False,
218
+ verbose: bool = False,
219
  ):
220
  """Process images from HF dataset through DeepSeek-OCR model with vLLM."""
221
 
 
335
  # Handle inference_info tracking
336
  logger.info("Updating inference_info...")
337
 
338
+ inference_entry = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
339
  "model_id": model,
340
+ "model_name": "DeepSeek-OCR",
341
+ "column_name": "markdown",
342
+ "timestamp": datetime.now().isoformat(),
343
  "prompt_mode": prompt_mode if prompt is None else "custom",
344
  "batch_size": batch_size,
345
  "max_tokens": max_tokens,
346
  "gpu_memory_utilization": gpu_memory_utilization,
347
  "max_model_len": max_model_len,
348
  "script": "deepseek-ocr-vllm.py",
 
349
  "script_url": "https://huggingface.co/datasets/uv-scripts/ocr/raw/main/deepseek-ocr-vllm.py",
 
350
  }
 
351
 
352
+ if "inference_info" in dataset.column_names:
353
+ logger.info("Updating existing inference_info column")
354
+
355
+ def update_inference_info(example):
356
+ try:
357
+ existing_info = (
358
+ json.loads(example["inference_info"])
359
+ if example["inference_info"]
360
+ else []
361
+ )
362
+ except (json.JSONDecodeError, TypeError):
363
+ existing_info = []
364
+ existing_info.append(inference_entry)
365
+ return {"inference_info": json.dumps(existing_info)}
366
+
367
+ dataset = dataset.map(update_inference_info)
368
+ else:
369
+ logger.info("Creating new inference_info column")
370
+ inference_list = [json.dumps([inference_entry])] * len(dataset)
371
+ dataset = dataset.add_column("inference_info", inference_list)
372
 
373
  # Push to hub
374
  logger.info(f"Pushing to {output_dataset}")
 
376
  output_dataset,
377
  private=private,
378
  token=HF_TOKEN,
379
+ **({"config_name": config} if config else {}),
380
  create_pr=create_pr,
381
  commit_message=f"Add {model} OCR results ({len(dataset)} samples)"
382
  + (f" [{config}]" if config else ""),
 
407
  )
408
  logger.info(f"Processing time: {processing_time_str}")
409
 
410
+ if verbose:
411
+ import importlib.metadata
412
+
413
+ logger.info("--- Resolved package versions ---")
414
+ for pkg in ["vllm", "transformers", "torch", "datasets", "pyarrow", "pillow"]:
415
+ try:
416
+ logger.info(f" {pkg}=={importlib.metadata.version(pkg)}")
417
+ except importlib.metadata.PackageNotFoundError:
418
+ logger.info(f" {pkg}: not installed")
419
+ logger.info("--- End versions ---")
420
+
421
 
422
  if __name__ == "__main__":
423
  # Show example usage if no arguments
 
571
  default=42,
572
  help="Random seed for shuffling (default: 42)",
573
  )
574
+ parser.add_argument(
575
+ "--verbose",
576
+ action="store_true",
577
+ help="Log resolved package versions after processing (useful for pinning deps)",
578
+ )
579
 
580
  args = parser.parse_args()
581
 
 
598
  seed=args.seed,
599
  config=args.config,
600
  create_pr=args.create_pr,
601
+ verbose=args.verbose,
602
  )
deepseek-ocr2-vllm.py CHANGED
@@ -13,7 +13,7 @@
13
  # ]
14
  #
15
  # [[tool.uv.index]]
16
- # url = "https://wheels.vllm.ai/nightly"
17
  #
18
  # [tool.uv]
19
  # prerelease = "allow"
@@ -49,6 +49,7 @@ import json
49
  import logging
50
  import os
51
  import sys
 
52
  from datetime import datetime
53
  from typing import Any, Dict, Union
54
 
@@ -219,6 +220,10 @@ def main(
219
  private: bool = False,
220
  shuffle: bool = False,
221
  seed: int = 42,
 
 
 
 
222
  ):
223
  """Process images from HF dataset through DeepSeek-OCR-2 model with vLLM."""
224
 
@@ -332,52 +337,72 @@ def main(
332
  processing_duration = datetime.now() - start_time
333
  processing_time_str = f"{processing_duration.total_seconds() / 60:.1f} min"
334
 
335
- # Add markdown column to dataset
336
- logger.info("Adding markdown column to dataset")
337
- dataset = dataset.add_column("markdown", all_markdown)
 
 
 
338
 
339
  # Handle inference_info tracking
340
- logger.info("Updating inference_info...")
341
-
342
- # Check for existing inference_info
343
- if "inference_info" in dataset.column_names:
344
- # Parse existing info from first row (all rows have same info)
345
- try:
346
- existing_info = json.loads(dataset[0]["inference_info"])
347
- if not isinstance(existing_info, list):
348
- existing_info = [existing_info] # Convert old format to list
349
- except (json.JSONDecodeError, TypeError):
350
- existing_info = []
351
- # Remove old column to update it
352
- dataset = dataset.remove_columns(["inference_info"])
353
- else:
354
- existing_info = []
355
-
356
- # Add new inference info
357
- new_info = {
358
- "column_name": "markdown",
359
  "model_id": model,
360
- "processing_date": datetime.now().isoformat(),
361
- "prompt": final_prompt,
 
362
  "prompt_mode": prompt_mode if prompt is None else "custom",
363
- "batch_size": batch_size,
364
  "max_tokens": max_tokens,
365
- "gpu_memory_utilization": gpu_memory_utilization,
366
- "max_model_len": max_model_len,
367
- "script": "deepseek-ocr2-vllm.py",
368
- "script_version": "1.0.0",
369
- "script_url": "https://huggingface.co/datasets/uv-scripts/ocr/raw/main/deepseek-ocr2-vllm.py",
370
- "implementation": "vllm (batch processing, llm.generate + NGramPerReqLogitsProcessor)",
371
  }
372
- existing_info.append(new_info)
373
 
374
- # Add updated inference_info column
375
- info_json = json.dumps(existing_info, ensure_ascii=False)
376
- dataset = dataset.add_column("inference_info", [info_json] * len(dataset))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
377
 
378
- # Push to hub
379
  logger.info(f"Pushing to {output_dataset}")
380
- dataset.push_to_hub(output_dataset, private=private, token=HF_TOKEN)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
381
 
382
  # Create and push dataset card
383
  logger.info("Creating dataset card...")
@@ -404,6 +429,17 @@ def main(
404
  )
405
  logger.info(f"Processing time: {processing_time_str}")
406
 
 
 
 
 
 
 
 
 
 
 
 
407
 
408
  if __name__ == "__main__":
409
  # Show example usage if no arguments
@@ -529,6 +565,11 @@ Examples:
529
  parser.add_argument(
530
  "--private", action="store_true", help="Make output dataset private"
531
  )
 
 
 
 
 
532
  parser.add_argument(
533
  "--shuffle",
534
  action="store_true",
@@ -540,6 +581,20 @@ Examples:
540
  default=42,
541
  help="Random seed for shuffling (default: 42)",
542
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
543
 
544
  args = parser.parse_args()
545
 
@@ -560,4 +615,8 @@ Examples:
560
  private=args.private,
561
  shuffle=args.shuffle,
562
  seed=args.seed,
 
 
 
 
563
  )
 
13
  # ]
14
  #
15
  # [[tool.uv.index]]
16
+ # url = "https://wheels.vllm.ai/nightly/cu129"
17
  #
18
  # [tool.uv]
19
  # prerelease = "allow"
 
49
  import logging
50
  import os
51
  import sys
52
+ import time
53
  from datetime import datetime
54
  from typing import Any, Dict, Union
55
 
 
220
  private: bool = False,
221
  shuffle: bool = False,
222
  seed: int = 42,
223
+ output_column: str = "markdown",
224
+ config: str = None,
225
+ create_pr: bool = False,
226
+ verbose: bool = False,
227
  ):
228
  """Process images from HF dataset through DeepSeek-OCR-2 model with vLLM."""
229
 
 
337
  processing_duration = datetime.now() - start_time
338
  processing_time_str = f"{processing_duration.total_seconds() / 60:.1f} min"
339
 
340
+ # Add output column to dataset
341
+ logger.info(f"Adding '{output_column}' column to dataset")
342
+ if output_column in dataset.column_names:
343
+ logger.warning(f"Column '{output_column}' already exists, replacing it")
344
+ dataset = dataset.remove_columns([output_column])
345
+ dataset = dataset.add_column(output_column, all_markdown)
346
 
347
  # Handle inference_info tracking
348
+ inference_entry = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
349
  "model_id": model,
350
+ "model_name": "DeepSeek-OCR-2",
351
+ "column_name": output_column,
352
+ "timestamp": datetime.now().isoformat(),
353
  "prompt_mode": prompt_mode if prompt is None else "custom",
 
354
  "max_tokens": max_tokens,
 
 
 
 
 
 
355
  }
 
356
 
357
+ if "inference_info" in dataset.column_names:
358
+ logger.info("Updating existing inference_info column")
359
+
360
+ def update_inference_info(example):
361
+ try:
362
+ existing_info = (
363
+ json.loads(example["inference_info"])
364
+ if example["inference_info"]
365
+ else []
366
+ )
367
+ except (json.JSONDecodeError, TypeError):
368
+ existing_info = []
369
+ existing_info.append(inference_entry)
370
+ return {"inference_info": json.dumps(existing_info)}
371
+
372
+ dataset = dataset.map(update_inference_info)
373
+ else:
374
+ logger.info("Creating new inference_info column")
375
+ inference_list = [json.dumps([inference_entry])] * len(dataset)
376
+ dataset = dataset.add_column("inference_info", inference_list)
377
 
378
+ # Push to hub with retry and XET fallback
379
  logger.info(f"Pushing to {output_dataset}")
380
+ max_retries = 3
381
+ for attempt in range(1, max_retries + 1):
382
+ try:
383
+ if attempt > 1:
384
+ logger.warning("Disabling XET (fallback to HTTP upload)")
385
+ os.environ["HF_HUB_DISABLE_XET"] = "1"
386
+ dataset.push_to_hub(
387
+ output_dataset,
388
+ private=private,
389
+ token=HF_TOKEN,
390
+ max_shard_size="500MB",
391
+ **({"config_name": config} if config else {}),
392
+ create_pr=create_pr,
393
+ commit_message=f"Add {model} OCR results ({len(dataset)} samples)"
394
+ + (f" [{config}]" if config else ""),
395
+ )
396
+ break
397
+ except Exception as e:
398
+ logger.error(f"Upload attempt {attempt}/{max_retries} failed: {e}")
399
+ if attempt < max_retries:
400
+ delay = 30 * (2 ** (attempt - 1))
401
+ logger.info(f"Retrying in {delay}s...")
402
+ time.sleep(delay)
403
+ else:
404
+ logger.error("All upload attempts failed. OCR results are lost.")
405
+ sys.exit(1)
406
 
407
  # Create and push dataset card
408
  logger.info("Creating dataset card...")
 
429
  )
430
  logger.info(f"Processing time: {processing_time_str}")
431
 
432
+ if verbose:
433
+ import importlib.metadata
434
+
435
+ logger.info("--- Resolved package versions ---")
436
+ for pkg in ["vllm", "transformers", "torch", "datasets", "pyarrow", "pillow"]:
437
+ try:
438
+ logger.info(f" {pkg}=={importlib.metadata.version(pkg)}")
439
+ except importlib.metadata.PackageNotFoundError:
440
+ logger.info(f" {pkg}: not installed")
441
+ logger.info("--- End versions ---")
442
+
443
 
444
  if __name__ == "__main__":
445
  # Show example usage if no arguments
 
565
  parser.add_argument(
566
  "--private", action="store_true", help="Make output dataset private"
567
  )
568
+ parser.add_argument(
569
+ "--output-column",
570
+ default="markdown",
571
+ help="Column name for OCR output (default: markdown). Use a different name to add alongside existing OCR.",
572
+ )
573
  parser.add_argument(
574
  "--shuffle",
575
  action="store_true",
 
581
  default=42,
582
  help="Random seed for shuffling (default: 42)",
583
  )
584
+ parser.add_argument(
585
+ "--config",
586
+ help="Config/subset name when pushing to Hub (for benchmarking multiple models in one repo)",
587
+ )
588
+ parser.add_argument(
589
+ "--create-pr",
590
+ action="store_true",
591
+ help="Create a pull request instead of pushing directly (for parallel benchmarking)",
592
+ )
593
+ parser.add_argument(
594
+ "--verbose",
595
+ action="store_true",
596
+ help="Log resolved package versions after processing (useful for pinning deps)",
597
+ )
598
 
599
  args = parser.parse_args()
600
 
 
615
  private=args.private,
616
  shuffle=args.shuffle,
617
  seed=args.seed,
618
+ output_column=args.output_column,
619
+ config=args.config,
620
+ create_pr=args.create_pr,
621
+ verbose=args.verbose,
622
  )
dots-ocr-1.5.py ADDED
@@ -0,0 +1,667 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # requires-python = ">=3.11"
3
+ # dependencies = [
4
+ # "datasets",
5
+ # "huggingface-hub",
6
+ # "pillow",
7
+ # "vllm>=0.9.1",
8
+ # "tqdm",
9
+ # "toolz",
10
+ # "torch",
11
+ # ]
12
+ #
13
+ # ///
14
+
15
+ """
16
+ Convert document images to markdown using DoTS.ocr-1.5 with vLLM.
17
+
18
+ DoTS.ocr-1.5 is a 3B multilingual document parsing model with SOTA performance
19
+ on 100+ languages. Compared to v1 (1.7B), it adds web screen parsing, scene text
20
+ spotting, SVG code generation, and stronger multilingual document parsing.
21
+
22
+ Features:
23
+ - Multilingual support (100+ languages)
24
+ - Table extraction and formatting
25
+ - Formula recognition
26
+ - Layout-aware text extraction
27
+ - Web screen parsing (NEW in v1.5)
28
+ - Scene text spotting (NEW in v1.5)
29
+ - SVG code generation (requires dots.ocr-1.5-svg variant)
30
+
31
+ Model: rednote-hilab/dots.ocr-1.5
32
+ vLLM: Officially supported (same DotsOCRForCausalLM architecture as v1)
33
+ """
34
+
35
+ import argparse
36
+ import base64
37
+ import io
38
+ import json
39
+ import logging
40
+ import os
41
+ import sys
42
+ import time
43
+ from datetime import datetime
44
+ from typing import Any, Dict, List, Union
45
+
46
+ import torch
47
+ from datasets import load_dataset
48
+ from huggingface_hub import DatasetCard, login
49
+ from PIL import Image
50
+ from toolz import partition_all
51
+ from tqdm.auto import tqdm
52
+ from vllm import LLM, SamplingParams
53
+
54
+ logging.basicConfig(level=logging.INFO)
55
+ logger = logging.getLogger(__name__)
56
+
57
+
58
+ # ────────────────────────────────────────────────────────────────
59
+ # DoTS OCR 1.5 Prompt Templates (from official dots.ocr repo)
60
+ # Source: https://github.com/rednote-hilab/dots.ocr/blob/master/dots_ocr/utils/prompts.py
61
+ # ────────────────────────────────────────────────────────────────
62
+
63
+ PROMPT_TEMPLATES = {
64
+ "ocr": """Extract the text content from this image.""",
65
+ "layout-all": """Please output the layout information from the PDF image, including each layout element's bbox, its category, and the corresponding text content within the bbox.
66
+
67
+ 1. Bbox format: [x1, y1, x2, y2]
68
+
69
+ 2. Layout Categories: The possible categories are ['Caption', 'Footnote', 'Formula', 'List-item', 'Page-footer', 'Page-header', 'Picture', 'Section-header', 'Table', 'Text', 'Title'].
70
+
71
+ 3. Text Extraction & Formatting Rules:
72
+ - Picture: For the 'Picture' category, the text field should be omitted.
73
+ - Formula: Format its text as LaTeX.
74
+ - Table: Format its text as HTML.
75
+ - All Others (Text, Title, etc.): Format their text as Markdown.
76
+
77
+ 4. Constraints:
78
+ - The output text must be the original text from the image, with no translation.
79
+ - All layout elements must be sorted according to human reading order.
80
+
81
+ 5. Final Output: The entire output must be a single JSON object.
82
+ """,
83
+ "layout-only": """Please output the layout information from this PDF image, including each layout's bbox and its category. The bbox should be in the format [x1, y1, x2, y2]. The layout categories for the PDF document include ['Caption', 'Footnote', 'Formula', 'List-item', 'Page-footer', 'Page-header', 'Picture', 'Section-header', 'Table', 'Text', 'Title']. Do not output the corresponding text. The layout result should be in JSON format.""",
84
+ # NEW in v1.5:
85
+ "web-parsing": """Parsing the layout info of this webpage image with format json:\n""",
86
+ "scene-spotting": """Detect and recognize the text in the image.""",
87
+ "grounding-ocr": """Extract text from the given bounding box on the image (format: [x1, y1, x2, y2]).\nBounding Box:\n""",
88
+ "general": """ """,
89
+ }
90
+
91
+
92
+ def check_cuda_availability():
93
+ """Check if CUDA is available and exit if not."""
94
+ if not torch.cuda.is_available():
95
+ logger.error("CUDA is not available. This script requires a GPU.")
96
+ logger.error("Please run on a machine with a CUDA-capable GPU.")
97
+ sys.exit(1)
98
+ else:
99
+ logger.info(f"CUDA is available. GPU: {torch.cuda.get_device_name(0)}")
100
+
101
+
102
+ def make_ocr_message(
103
+ image: Union[Image.Image, Dict[str, Any], str],
104
+ prompt: str = PROMPT_TEMPLATES["ocr"],
105
+ ) -> List[Dict]:
106
+ """Create chat message for OCR processing."""
107
+ # Convert to PIL Image if needed
108
+ if isinstance(image, Image.Image):
109
+ pil_img = image
110
+ elif isinstance(image, dict) and "bytes" in image:
111
+ pil_img = Image.open(io.BytesIO(image["bytes"]))
112
+ elif isinstance(image, str):
113
+ pil_img = Image.open(image)
114
+ else:
115
+ raise ValueError(f"Unsupported image type: {type(image)}")
116
+
117
+ # Convert to RGB
118
+ pil_img = pil_img.convert("RGB")
119
+
120
+ # Convert to base64 data URI
121
+ buf = io.BytesIO()
122
+ pil_img.save(buf, format="PNG")
123
+ data_uri = f"data:image/png;base64,{base64.b64encode(buf.getvalue()).decode()}"
124
+
125
+ # Return message in vLLM format
126
+ return [
127
+ {
128
+ "role": "user",
129
+ "content": [
130
+ {"type": "image_url", "image_url": {"url": data_uri}},
131
+ {"type": "text", "text": prompt},
132
+ ],
133
+ }
134
+ ]
135
+
136
+
137
+ def create_dataset_card(
138
+ source_dataset: str,
139
+ model: str,
140
+ num_samples: int,
141
+ processing_time: str,
142
+ batch_size: int,
143
+ max_model_len: int,
144
+ max_tokens: int,
145
+ gpu_memory_utilization: float,
146
+ image_column: str = "image",
147
+ split: str = "train",
148
+ prompt_mode: str = "ocr",
149
+ ) -> str:
150
+ """Create a dataset card documenting the OCR process."""
151
+ model_name = model.split("/")[-1]
152
+
153
+ return f"""---
154
+ tags:
155
+ - ocr
156
+ - document-processing
157
+ - dots-ocr-1.5
158
+ - multilingual
159
+ - markdown
160
+ - uv-script
161
+ - generated
162
+ ---
163
+
164
+ # Document OCR using {model_name}
165
+
166
+ This dataset contains OCR results from images in [{source_dataset}](https://huggingface.co/datasets/{source_dataset}) using DoTS.ocr-1.5, a 3B multilingual model with SOTA document parsing.
167
+
168
+ ## Processing Details
169
+
170
+ - **Source Dataset**: [{source_dataset}](https://huggingface.co/datasets/{source_dataset})
171
+ - **Model**: [{model}](https://huggingface.co/{model})
172
+ - **Number of Samples**: {num_samples:,}
173
+ - **Processing Time**: {processing_time}
174
+ - **Processing Date**: {datetime.now().strftime("%Y-%m-%d %H:%M UTC")}
175
+
176
+ ### Configuration
177
+
178
+ - **Image Column**: `{image_column}`
179
+ - **Output Column**: `markdown`
180
+ - **Dataset Split**: `{split}`
181
+ - **Batch Size**: {batch_size}
182
+ - **Prompt Mode**: {prompt_mode}
183
+ - **Max Model Length**: {max_model_len:,} tokens
184
+ - **Max Output Tokens**: {max_tokens:,}
185
+ - **GPU Memory Utilization**: {gpu_memory_utilization:.1%}
186
+
187
+ ## Model Information
188
+
189
+ DoTS.ocr-1.5 is a 3B multilingual document parsing model that excels at:
190
+ - 100+ Languages — Multilingual document support
191
+ - Table extraction — Structured data recognition
192
+ - Formulas — Mathematical notation preservation
193
+ - Layout-aware — Reading order and structure preservation
194
+ - Web screen parsing — Webpage layout analysis
195
+ - Scene text spotting — Text detection in natural scenes
196
+
197
+ ## Dataset Structure
198
+
199
+ The dataset contains all original columns plus:
200
+ - `markdown`: The extracted text in markdown format
201
+ - `inference_info`: JSON list tracking all OCR models applied to this dataset
202
+
203
+ ## Usage
204
+
205
+ ```python
206
+ from datasets import load_dataset
207
+ import json
208
+
209
+ # Load the dataset
210
+ dataset = load_dataset("{{output_dataset_id}}", split="{split}")
211
+
212
+ # Access the markdown text
213
+ for example in dataset:
214
+ print(example["markdown"])
215
+ break
216
+
217
+ # View all OCR models applied to this dataset
218
+ inference_info = json.loads(dataset[0]["inference_info"])
219
+ for info in inference_info:
220
+ print(f"Column: {{info['column_name']}} - Model: {{info['model_id']}}")
221
+ ```
222
+
223
+ ## Reproduction
224
+
225
+ This dataset was generated using the [uv-scripts/ocr](https://huggingface.co/datasets/uv-scripts/ocr) DoTS OCR 1.5 script:
226
+
227
+ ```bash
228
+ uv run https://huggingface.co/datasets/uv-scripts/ocr/raw/main/dots-ocr-1.5.py \\
229
+ {source_dataset} \\
230
+ <output-dataset> \\
231
+ --image-column {image_column} \\
232
+ --batch-size {batch_size} \\
233
+ --prompt-mode {prompt_mode} \\
234
+ --max-model-len {max_model_len} \\
235
+ --max-tokens {max_tokens} \\
236
+ --gpu-memory-utilization {gpu_memory_utilization}
237
+ ```
238
+
239
+ Generated with [UV Scripts](https://huggingface.co/uv-scripts)
240
+ """
241
+
242
+
243
+ def main(
244
+ input_dataset: str,
245
+ output_dataset: str,
246
+ image_column: str = "image",
247
+ batch_size: int = 16,
248
+ model: str = "rednote-hilab/dots.ocr-1.5",
249
+ max_model_len: int = 24000,
250
+ max_tokens: int = 24000,
251
+ gpu_memory_utilization: float = 0.9,
252
+ hf_token: str = None,
253
+ split: str = "train",
254
+ max_samples: int = None,
255
+ private: bool = False,
256
+ shuffle: bool = False,
257
+ seed: int = 42,
258
+ prompt_mode: str = "ocr",
259
+ custom_prompt: str = None,
260
+ output_column: str = "markdown",
261
+ config: str = None,
262
+ create_pr: bool = False,
263
+ temperature: float = 0.1,
264
+ top_p: float = 0.9,
265
+ verbose: bool = False,
266
+ ):
267
+ """Process images from HF dataset through DoTS.ocr-1.5 model."""
268
+
269
+ # Check CUDA availability first
270
+ check_cuda_availability()
271
+
272
+ # Track processing start time
273
+ start_time = datetime.now()
274
+
275
+ # Login to HF if token provided
276
+ HF_TOKEN = hf_token or os.environ.get("HF_TOKEN")
277
+ if HF_TOKEN:
278
+ login(token=HF_TOKEN)
279
+
280
+ # Determine prompt to use
281
+ if custom_prompt:
282
+ prompt = custom_prompt
283
+ logger.info(f"Using custom prompt: {prompt[:50]}...")
284
+ else:
285
+ prompt = PROMPT_TEMPLATES.get(prompt_mode, PROMPT_TEMPLATES["ocr"])
286
+ logger.info(f"Using prompt mode: {prompt_mode}")
287
+
288
+ # Load dataset
289
+ logger.info(f"Loading dataset: {input_dataset}")
290
+ dataset = load_dataset(input_dataset, split=split)
291
+
292
+ # Validate image column
293
+ if image_column not in dataset.column_names:
294
+ raise ValueError(
295
+ f"Column '{image_column}' not found. Available: {dataset.column_names}"
296
+ )
297
+
298
+ # Shuffle if requested
299
+ if shuffle:
300
+ logger.info(f"Shuffling dataset with seed {seed}")
301
+ dataset = dataset.shuffle(seed=seed)
302
+
303
+ # Limit samples if requested
304
+ if max_samples:
305
+ dataset = dataset.select(range(min(max_samples, len(dataset))))
306
+ logger.info(f"Limited to {len(dataset)} samples")
307
+
308
+ # Initialize vLLM model
309
+ logger.info(f"Initializing vLLM with model: {model}")
310
+ logger.info("This may take a few minutes on first run...")
311
+ llm = LLM(
312
+ model=model,
313
+ trust_remote_code=True,
314
+ max_model_len=max_model_len,
315
+ gpu_memory_utilization=gpu_memory_utilization,
316
+ )
317
+
318
+ sampling_params = SamplingParams(
319
+ temperature=temperature,
320
+ top_p=top_p,
321
+ max_tokens=max_tokens,
322
+ )
323
+
324
+ logger.info(f"Processing {len(dataset)} images in batches of {batch_size}")
325
+ logger.info(f"Output will be written to column: {output_column}")
326
+
327
+ # Process images in batches
328
+ all_outputs = []
329
+
330
+ for batch_indices in tqdm(
331
+ partition_all(batch_size, range(len(dataset))),
332
+ total=(len(dataset) + batch_size - 1) // batch_size,
333
+ desc="DoTS.ocr-1.5 processing",
334
+ ):
335
+ batch_indices = list(batch_indices)
336
+ batch_images = [dataset[i][image_column] for i in batch_indices]
337
+
338
+ try:
339
+ # Create messages for batch
340
+ batch_messages = [make_ocr_message(img, prompt) for img in batch_images]
341
+
342
+ # Process with vLLM
343
+ outputs = llm.chat(batch_messages, sampling_params)
344
+
345
+ # Extract outputs
346
+ for output in outputs:
347
+ text = output.outputs[0].text.strip()
348
+ all_outputs.append(text)
349
+
350
+ except Exception as e:
351
+ logger.error(f"Error processing batch: {e}")
352
+ # Add error placeholders for failed batch
353
+ all_outputs.extend(["[OCR ERROR]"] * len(batch_images))
354
+
355
+ # Calculate processing time
356
+ processing_duration = datetime.now() - start_time
357
+ processing_time_str = f"{processing_duration.total_seconds() / 60:.1f} min"
358
+
359
+ # Add output column to dataset
360
+ logger.info(f"Adding '{output_column}' column to dataset")
361
+ dataset = dataset.add_column(output_column, all_outputs)
362
+
363
+ # Handle inference_info tracking (for multi-model comparisons)
364
+ inference_entry = {
365
+ "model_id": model,
366
+ "model_name": "DoTS.ocr-1.5",
367
+ "column_name": output_column,
368
+ "timestamp": datetime.now().isoformat(),
369
+ "prompt_mode": prompt_mode if not custom_prompt else "custom",
370
+ "temperature": temperature,
371
+ "top_p": top_p,
372
+ "max_tokens": max_tokens,
373
+ }
374
+
375
+ if "inference_info" in dataset.column_names:
376
+ # Append to existing inference info
377
+ logger.info("Updating existing inference_info column")
378
+
379
+ def update_inference_info(example):
380
+ try:
381
+ existing_info = (
382
+ json.loads(example["inference_info"])
383
+ if example["inference_info"]
384
+ else []
385
+ )
386
+ except (json.JSONDecodeError, TypeError):
387
+ existing_info = []
388
+
389
+ existing_info.append(inference_entry)
390
+ return {"inference_info": json.dumps(existing_info)}
391
+
392
+ dataset = dataset.map(update_inference_info)
393
+ else:
394
+ # Create new inference_info column
395
+ logger.info("Creating new inference_info column")
396
+ inference_list = [json.dumps([inference_entry])] * len(dataset)
397
+ dataset = dataset.add_column("inference_info", inference_list)
398
+
399
+ # Push to hub with retry and XET fallback
400
+ logger.info(f"Pushing to {output_dataset}")
401
+ max_retries = 3
402
+ for attempt in range(1, max_retries + 1):
403
+ try:
404
+ if attempt > 1:
405
+ logger.warning("Disabling XET (fallback to HTTP upload)")
406
+ os.environ["HF_HUB_DISABLE_XET"] = "1"
407
+ dataset.push_to_hub(
408
+ output_dataset,
409
+ private=private,
410
+ token=HF_TOKEN,
411
+ max_shard_size="500MB",
412
+ **({"config_name": config} if config else {}),
413
+ create_pr=create_pr,
414
+ commit_message=f"Add {model} OCR results ({len(dataset)} samples)"
415
+ + (f" [{config}]" if config else ""),
416
+ )
417
+ break
418
+ except Exception as e:
419
+ logger.error(f"Upload attempt {attempt}/{max_retries} failed: {e}")
420
+ if attempt < max_retries:
421
+ delay = 30 * (2 ** (attempt - 1))
422
+ logger.info(f"Retrying in {delay}s...")
423
+ time.sleep(delay)
424
+ else:
425
+ logger.error("All upload attempts failed. OCR results are lost.")
426
+ sys.exit(1)
427
+
428
+ # Create and push dataset card
429
+ logger.info("Creating dataset card")
430
+ card_content = create_dataset_card(
431
+ source_dataset=input_dataset,
432
+ model=model,
433
+ num_samples=len(dataset),
434
+ processing_time=processing_time_str,
435
+ batch_size=batch_size,
436
+ max_model_len=max_model_len,
437
+ max_tokens=max_tokens,
438
+ gpu_memory_utilization=gpu_memory_utilization,
439
+ image_column=image_column,
440
+ split=split,
441
+ prompt_mode=prompt_mode if not custom_prompt else "custom",
442
+ )
443
+
444
+ card = DatasetCard(card_content)
445
+ card.push_to_hub(output_dataset, token=HF_TOKEN)
446
+
447
+ logger.info("DoTS.ocr-1.5 processing complete!")
448
+ logger.info(
449
+ f"Dataset available at: https://huggingface.co/datasets/{output_dataset}"
450
+ )
451
+ logger.info(f"Processing time: {processing_time_str}")
452
+
453
+ if verbose:
454
+ import importlib.metadata
455
+
456
+ logger.info("--- Resolved package versions ---")
457
+ for pkg in ["vllm", "transformers", "torch", "datasets", "pyarrow", "pillow"]:
458
+ try:
459
+ logger.info(f" {pkg}=={importlib.metadata.version(pkg)}")
460
+ except importlib.metadata.PackageNotFoundError:
461
+ logger.info(f" {pkg}: not installed")
462
+ logger.info("--- End versions ---")
463
+
464
+
465
+ if __name__ == "__main__":
466
+ # Show example usage if no arguments
467
+ if len(sys.argv) == 1:
468
+ print("=" * 80)
469
+ print("DoTS.ocr-1.5 Document Processing")
470
+ print("=" * 80)
471
+ print("\n3B multilingual OCR model supporting 100+ languages")
472
+ print("\nFeatures:")
473
+ print("- Multilingual support (100+ languages)")
474
+ print("- Fast processing with vLLM")
475
+ print("- Table extraction and formatting")
476
+ print("- Formula recognition")
477
+ print("- Layout-aware text extraction")
478
+ print("- Web screen parsing (NEW in v1.5)")
479
+ print("- Scene text spotting (NEW in v1.5)")
480
+ print("\nPrompt modes:")
481
+ print(" ocr - Text extraction (default)")
482
+ print(" layout-all - Layout + bboxes + text (JSON)")
483
+ print(" layout-only - Layout + bboxes only (JSON)")
484
+ print(" web-parsing - Webpage layout analysis (JSON)")
485
+ print(" scene-spotting - Scene text detection")
486
+ print(" grounding-ocr - Text from bounding box region")
487
+ print(" general - Free-form (use with --custom-prompt)")
488
+ print("\nExample usage:")
489
+ print("\n1. Basic OCR:")
490
+ print(" uv run dots-ocr-1.5.py input-dataset output-dataset")
491
+ print("\n2. Web screen parsing:")
492
+ print(" uv run dots-ocr-1.5.py screenshots parsed --prompt-mode web-parsing")
493
+ print("\n3. Scene text spotting:")
494
+ print(" uv run dots-ocr-1.5.py photos detected --prompt-mode scene-spotting")
495
+ print("\n4. Layout analysis with structure:")
496
+ print(" uv run dots-ocr-1.5.py papers analyzed --prompt-mode layout-all")
497
+ print("\n5. Running on HF Jobs:")
498
+ print(" hf jobs uv run --flavor l4x1 \\")
499
+ print(" -s HF_TOKEN \\")
500
+ print(
501
+ " https://huggingface.co/datasets/uv-scripts/ocr/raw/main/dots-ocr-1.5.py \\"
502
+ )
503
+ print(" input-dataset output-dataset")
504
+ print("\n" + "=" * 80)
505
+ print("\nFor full help, run: uv run dots-ocr-1.5.py --help")
506
+ sys.exit(0)
507
+
508
+ parser = argparse.ArgumentParser(
509
+ description="Document OCR using DoTS.ocr-1.5 (3B multilingual model)",
510
+ formatter_class=argparse.RawDescriptionHelpFormatter,
511
+ epilog="""
512
+ Prompt Modes (official DoTS.ocr-1.5 prompts):
513
+ ocr - Simple text extraction (default)
514
+ layout-all - Layout analysis with bboxes, categories, and text (JSON output)
515
+ layout-only - Layout detection with bboxes and categories only (JSON output)
516
+ web-parsing - Webpage layout analysis (JSON output) [NEW in v1.5]
517
+ scene-spotting - Scene text detection and recognition [NEW in v1.5]
518
+ grounding-ocr - Extract text from bounding box region [NEW in v1.5]
519
+ general - Free-form QA (use with --custom-prompt) [NEW in v1.5]
520
+
521
+ SVG Code Generation:
522
+ For SVG output, use --model rednote-hilab/dots.ocr-1.5-svg with:
523
+ --custom-prompt 'Please generate the SVG code based on the image.'
524
+
525
+ Examples:
526
+ # Basic text OCR (default)
527
+ uv run dots-ocr-1.5.py my-docs analyzed-docs
528
+
529
+ # Web screen parsing
530
+ uv run dots-ocr-1.5.py screenshots parsed --prompt-mode web-parsing
531
+
532
+ # Scene text spotting
533
+ uv run dots-ocr-1.5.py photos spotted --prompt-mode scene-spotting
534
+
535
+ # Full layout analysis with structure
536
+ uv run dots-ocr-1.5.py papers structured --prompt-mode layout-all
537
+
538
+ # Random sampling for testing
539
+ uv run dots-ocr-1.5.py large-dataset test --max-samples 50 --shuffle
540
+ """,
541
+ )
542
+
543
+ parser.add_argument("input_dataset", help="Input dataset ID from Hugging Face Hub")
544
+ parser.add_argument("output_dataset", help="Output dataset ID for Hugging Face Hub")
545
+ parser.add_argument(
546
+ "--image-column",
547
+ default="image",
548
+ help="Column containing images (default: image)",
549
+ )
550
+ parser.add_argument(
551
+ "--batch-size",
552
+ type=int,
553
+ default=16,
554
+ help="Batch size for processing (default: 16)",
555
+ )
556
+ parser.add_argument(
557
+ "--model",
558
+ default="rednote-hilab/dots.ocr-1.5",
559
+ help="Model to use (default: rednote-hilab/dots.ocr-1.5)",
560
+ )
561
+ parser.add_argument(
562
+ "--max-model-len",
563
+ type=int,
564
+ default=24000,
565
+ help="Maximum model context length (default: 24000)",
566
+ )
567
+ parser.add_argument(
568
+ "--max-tokens",
569
+ type=int,
570
+ default=24000,
571
+ help="Maximum tokens to generate (default: 24000)",
572
+ )
573
+ parser.add_argument(
574
+ "--gpu-memory-utilization",
575
+ type=float,
576
+ default=0.9,
577
+ help="GPU memory utilization (default: 0.9)",
578
+ )
579
+ parser.add_argument("--hf-token", help="Hugging Face API token")
580
+ parser.add_argument(
581
+ "--split", default="train", help="Dataset split to use (default: train)"
582
+ )
583
+ parser.add_argument(
584
+ "--max-samples",
585
+ type=int,
586
+ help="Maximum number of samples to process (for testing)",
587
+ )
588
+ parser.add_argument(
589
+ "--private", action="store_true", help="Make output dataset private"
590
+ )
591
+ parser.add_argument(
592
+ "--shuffle", action="store_true", help="Shuffle dataset before processing"
593
+ )
594
+ parser.add_argument(
595
+ "--seed",
596
+ type=int,
597
+ default=42,
598
+ help="Random seed for shuffling (default: 42)",
599
+ )
600
+ parser.add_argument(
601
+ "--prompt-mode",
602
+ choices=list(PROMPT_TEMPLATES.keys()),
603
+ default="ocr",
604
+ help=f"Prompt template to use: {', '.join(PROMPT_TEMPLATES.keys())} (default: ocr)",
605
+ )
606
+ parser.add_argument(
607
+ "--custom-prompt",
608
+ help="Custom prompt text (overrides --prompt-mode)",
609
+ )
610
+ parser.add_argument(
611
+ "--output-column",
612
+ default="markdown",
613
+ help="Column name for output text (default: markdown)",
614
+ )
615
+ parser.add_argument(
616
+ "--config",
617
+ help="Config/subset name when pushing to Hub (for benchmarking multiple models in one repo)",
618
+ )
619
+ parser.add_argument(
620
+ "--create-pr",
621
+ action="store_true",
622
+ help="Create a pull request instead of pushing directly (for parallel benchmarking)",
623
+ )
624
+ parser.add_argument(
625
+ "--temperature",
626
+ type=float,
627
+ default=0.1,
628
+ help="Sampling temperature (default: 0.1, per official recommendation)",
629
+ )
630
+ parser.add_argument(
631
+ "--top-p",
632
+ type=float,
633
+ default=0.9,
634
+ help="Top-p sampling (default: 0.9, per official recommendation)",
635
+ )
636
+ parser.add_argument(
637
+ "--verbose",
638
+ action="store_true",
639
+ help="Log resolved package versions after processing (useful for pinning deps)",
640
+ )
641
+
642
+ args = parser.parse_args()
643
+
644
+ main(
645
+ input_dataset=args.input_dataset,
646
+ output_dataset=args.output_dataset,
647
+ image_column=args.image_column,
648
+ batch_size=args.batch_size,
649
+ model=args.model,
650
+ max_model_len=args.max_model_len,
651
+ max_tokens=args.max_tokens,
652
+ gpu_memory_utilization=args.gpu_memory_utilization,
653
+ hf_token=args.hf_token,
654
+ split=args.split,
655
+ max_samples=args.max_samples,
656
+ private=args.private,
657
+ shuffle=args.shuffle,
658
+ seed=args.seed,
659
+ prompt_mode=args.prompt_mode,
660
+ custom_prompt=args.custom_prompt,
661
+ output_column=args.output_column,
662
+ config=args.config,
663
+ create_pr=args.create_pr,
664
+ temperature=args.temperature,
665
+ top_p=args.top_p,
666
+ verbose=args.verbose,
667
+ )
dots-ocr.py CHANGED
@@ -383,7 +383,7 @@ def main(
383
  output_dataset,
384
  private=private,
385
  token=HF_TOKEN,
386
- config_name=config,
387
  create_pr=create_pr,
388
  commit_message=f"Add {model} OCR results ({len(dataset)} samples)"
389
  + (f" [{config}]" if config else ""),
 
383
  output_dataset,
384
  private=private,
385
  token=HF_TOKEN,
386
+ **({"config_name": config} if config else {}),
387
  create_pr=create_pr,
388
  commit_message=f"Add {model} OCR results ({len(dataset)} samples)"
389
  + (f" [{config}]" if config else ""),
glm-ocr.py CHANGED
@@ -383,7 +383,7 @@ def main(
383
  private=private,
384
  token=HF_TOKEN,
385
  max_shard_size="500MB",
386
- config_name=config,
387
  create_pr=create_pr,
388
  commit_message=f"Add {MODEL} OCR results ({len(dataset)} samples)"
389
  + (f" [{config}]" if config else ""),
 
383
  private=private,
384
  token=HF_TOKEN,
385
  max_shard_size="500MB",
386
+ **({"config_name": config} if config else {}),
387
  create_pr=create_pr,
388
  commit_message=f"Add {MODEL} OCR results ({len(dataset)} samples)"
389
  + (f" [{config}]" if config else ""),
hunyuan-ocr.py ADDED
@@ -0,0 +1,845 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # requires-python = ">=3.11"
3
+ # dependencies = [
4
+ # "datasets",
5
+ # "huggingface-hub",
6
+ # "pillow",
7
+ # "vllm",
8
+ # "tqdm",
9
+ # "toolz",
10
+ # "torch",
11
+ # ]
12
+ #
13
+ # [[tool.uv.index]]
14
+ # url = "https://wheels.vllm.ai/nightly/cu129"
15
+ #
16
+ # [tool.uv]
17
+ # prerelease = "allow"
18
+ # ///
19
+
20
+ """
21
+ Convert document images to markdown using HunyuanOCR with vLLM.
22
+
23
+ HunyuanOCR is a lightweight 1B parameter VLM from Tencent designed for complex
24
+ multilingual document parsing. This script uses vLLM for processing.
25
+
26
+ Features:
27
+ - 📝 Full document parsing to markdown
28
+ - 📊 Table extraction (HTML format)
29
+ - 📐 Formula recognition (LaTeX format)
30
+ - 📍 Text spotting with coordinates
31
+ - 🔍 Information extraction (key-value, fields, subtitles)
32
+ - 🌐 Photo translation
33
+ - 🎯 Compact model (1B parameters)
34
+
35
+ Model: tencent/HunyuanOCR
36
+ vLLM: Requires nightly build (trust_remote_code=True)
37
+
38
+ Note: Due to vLLM V1 engine batching issues with HunyuanOCR, batch_size defaults to 1.
39
+ """
40
+
41
+ import argparse
42
+ import base64
43
+ import io
44
+ import json
45
+ import logging
46
+ import os
47
+ import sys
48
+ import time
49
+ from datetime import datetime
50
+ from typing import Any, Dict, List, Union
51
+
52
+ import torch
53
+ from datasets import load_dataset
54
+ from huggingface_hub import DatasetCard, login
55
+ from PIL import Image
56
+ from toolz import partition_all
57
+ from tqdm.auto import tqdm
58
+ from vllm import LLM, SamplingParams
59
+
60
+ logging.basicConfig(level=logging.INFO)
61
+ logger = logging.getLogger(__name__)
62
+
63
+
64
+ # ────────────────────────────────────────────────────────────────
65
+ # HunyuanOCR Prompt Templates (from official README)
66
+ # Source: https://huggingface.co/tencent/HunyuanOCR
67
+ # ────────────────────────────────────────────────────────────────
68
+
69
+ PROMPT_TEMPLATES = {
70
+ # Parsing prompts
71
+ "parse-document": {
72
+ "en": "Extract all information from the main body of the document image and represent it in markdown format, ignoring headers and footers. Tables should be expressed in HTML format, formulas in the document should be represented using LaTeX format, and the parsing should be organized according to the reading order.",
73
+ "cn": "提取文档图片中正文的所有信息用 markdown 格式表示,其中页眉、页脚部分忽略,表格用 html 格式表达,文档中公式用 latex 格式表示,按照阅读顺序组织进行解析。",
74
+ },
75
+ "parse-formula": {
76
+ "en": "Identify the formula in the image and represent it using LaTeX format.",
77
+ "cn": "识别图片中的公式,用 LaTeX 格式表示。",
78
+ },
79
+ "parse-table": {
80
+ "en": "Parse the table in the image into HTML.",
81
+ "cn": "把图中的表格解析为 HTML。",
82
+ },
83
+ "parse-chart": {
84
+ "en": "Parse the chart in the image; use Mermaid format for flowcharts and Markdown for other charts.",
85
+ "cn": "解析图中的图表,对于流程图使用 Mermaid 格式表示,其他图表使用 Markdown 格式表示。",
86
+ },
87
+ # Spotting prompt
88
+ "spot": {
89
+ "en": "Detect and recognize text in the image, and output the text coordinates in a formatted manner.",
90
+ "cn": "检测并识别图片中的文字,将文本坐标格式化输出。",
91
+ },
92
+ # Extraction prompts
93
+ "extract-subtitles": {
94
+ "en": "Extract the subtitles from the image.",
95
+ "cn": "提取图片中的字幕。",
96
+ },
97
+ # Translation prompt (requires target_language substitution)
98
+ "translate": {
99
+ "en": "First extract the text, then translate the text content into {target_language}. If it is a document, ignore the header and footer. Formulas should be represented in LaTeX format, and tables should be represented in HTML format.",
100
+ "cn": "先提取文字,再将文字内容翻译为{target_language}。若是文档,则其中页眉、页脚忽略。公式用latex格式表示,表格用html格式表示。",
101
+ },
102
+ }
103
+
104
+ # Templates that require dynamic substitution
105
+ EXTRACT_KEY_TEMPLATE = {
106
+ "en": "Output the value of {key}.",
107
+ "cn": "输出 {key} 的值。",
108
+ }
109
+
110
+ EXTRACT_FIELDS_TEMPLATE = {
111
+ "en": "Extract the content of the fields: {fields} from the image and return it in JSON format.",
112
+ "cn": "提取图片中的: {fields} 的字段内容,并按照 JSON 格式返回。",
113
+ }
114
+
115
+
116
+ def clean_repeated_substrings(text: str, threshold: int = 10) -> str:
117
+ """
118
+ Remove repeated substrings from long outputs.
119
+
120
+ HunyuanOCR can sometimes produce repeated patterns in very long outputs.
121
+ This utility detects and removes substrings that repeat more than threshold times.
122
+
123
+ From: https://huggingface.co/tencent/HunyuanOCR
124
+ """
125
+ if len(text) <= 8000:
126
+ return text
127
+
128
+ # Check the last portion of the text for repetition
129
+ check_portion = text[-4000:]
130
+
131
+ # Find repeated patterns of various lengths
132
+ for pattern_len in range(10, 200):
133
+ pattern = check_portion[-pattern_len:]
134
+ count = check_portion.count(pattern)
135
+
136
+ if count >= threshold:
137
+ # Find where the repetition starts and truncate
138
+ first_occurrence = text.find(pattern)
139
+ if first_occurrence != -1:
140
+ return text[: first_occurrence + len(pattern)]
141
+
142
+ return text
143
+
144
+
145
+ def check_cuda_availability():
146
+ """Check if CUDA is available and exit if not."""
147
+ if not torch.cuda.is_available():
148
+ logger.error("CUDA is not available. This script requires a GPU.")
149
+ logger.error("Please run on a machine with a CUDA-capable GPU.")
150
+ sys.exit(1)
151
+ else:
152
+ logger.info(f"CUDA is available. GPU: {torch.cuda.get_device_name(0)}")
153
+
154
+
155
+ def get_prompt(
156
+ prompt_mode: str,
157
+ use_chinese: bool = False,
158
+ target_language: str = None,
159
+ key: str = None,
160
+ fields: List[str] = None,
161
+ ) -> str:
162
+ """Get the appropriate prompt for the given mode."""
163
+ lang = "cn" if use_chinese else "en"
164
+
165
+ if prompt_mode == "extract-key":
166
+ if not key:
167
+ raise ValueError("--key is required for extract-key mode")
168
+ template = EXTRACT_KEY_TEMPLATE[lang]
169
+ return template.format(key=key)
170
+
171
+ if prompt_mode == "extract-fields":
172
+ if not fields:
173
+ raise ValueError("--fields is required for extract-fields mode")
174
+ template = EXTRACT_FIELDS_TEMPLATE[lang]
175
+ fields_str = str(fields)
176
+ return template.format(fields=fields_str)
177
+
178
+ if prompt_mode == "translate":
179
+ if not target_language:
180
+ raise ValueError("--target-language is required for translate mode")
181
+ template = PROMPT_TEMPLATES["translate"][lang]
182
+ return template.format(target_language=target_language)
183
+
184
+ if prompt_mode not in PROMPT_TEMPLATES:
185
+ raise ValueError(
186
+ f"Unknown prompt mode: {prompt_mode}. "
187
+ f"Available: {list(PROMPT_TEMPLATES.keys()) + ['extract-key', 'extract-fields']}"
188
+ )
189
+
190
+ return PROMPT_TEMPLATES[prompt_mode][lang]
191
+
192
+
193
+ def make_ocr_message(
194
+ image: Union[Image.Image, Dict[str, Any], str],
195
+ prompt: str,
196
+ ) -> List[Dict]:
197
+ """Create chat message for OCR processing."""
198
+ # Convert to PIL Image if needed
199
+ if isinstance(image, Image.Image):
200
+ pil_img = image
201
+ elif isinstance(image, dict) and "bytes" in image:
202
+ pil_img = Image.open(io.BytesIO(image["bytes"]))
203
+ elif isinstance(image, str):
204
+ pil_img = Image.open(image)
205
+ else:
206
+ raise ValueError(f"Unsupported image type: {type(image)}")
207
+
208
+ # Convert to RGB
209
+ pil_img = pil_img.convert("RGB")
210
+
211
+ # Convert to base64 data URI
212
+ buf = io.BytesIO()
213
+ pil_img.save(buf, format="PNG")
214
+ data_uri = f"data:image/png;base64,{base64.b64encode(buf.getvalue()).decode()}"
215
+
216
+ # HunyuanOCR format: image before text
217
+ return [
218
+ {
219
+ "role": "user",
220
+ "content": [
221
+ {"type": "image_url", "image_url": {"url": data_uri}},
222
+ {"type": "text", "text": prompt},
223
+ ],
224
+ }
225
+ ]
226
+
227
+
228
+ def create_dataset_card(
229
+ source_dataset: str,
230
+ model: str,
231
+ num_samples: int,
232
+ processing_time: str,
233
+ batch_size: int,
234
+ max_model_len: int,
235
+ max_tokens: int,
236
+ gpu_memory_utilization: float,
237
+ image_column: str = "image",
238
+ split: str = "train",
239
+ prompt_mode: str = "parse-document",
240
+ use_chinese: bool = False,
241
+ ) -> str:
242
+ """Create a dataset card documenting the OCR process."""
243
+ model_name = model.split("/")[-1]
244
+
245
+ return f"""---
246
+ tags:
247
+ - ocr
248
+ - document-processing
249
+ - hunyuan-ocr
250
+ - multilingual
251
+ - markdown
252
+ - uv-script
253
+ - generated
254
+ ---
255
+
256
+ # Document OCR using {model_name}
257
+
258
+ This dataset contains OCR results from images in [{source_dataset}](https://huggingface.co/datasets/{source_dataset}) using HunyuanOCR, a lightweight 1B VLM from Tencent.
259
+
260
+ ## Processing Details
261
+
262
+ - **Source Dataset**: [{source_dataset}](https://huggingface.co/datasets/{source_dataset})
263
+ - **Model**: [{model}](https://huggingface.co/{model})
264
+ - **Number of Samples**: {num_samples:,}
265
+ - **Processing Time**: {processing_time}
266
+ - **Processing Date**: {datetime.now().strftime("%Y-%m-%d %H:%M UTC")}
267
+
268
+ ### Configuration
269
+
270
+ - **Image Column**: `{image_column}`
271
+ - **Output Column**: `markdown`
272
+ - **Dataset Split**: `{split}`
273
+ - **Batch Size**: {batch_size}
274
+ - **Prompt Mode**: {prompt_mode}
275
+ - **Prompt Language**: {"Chinese" if use_chinese else "English"}
276
+ - **Max Model Length**: {max_model_len:,} tokens
277
+ - **Max Output Tokens**: {max_tokens:,}
278
+ - **GPU Memory Utilization**: {gpu_memory_utilization:.1%}
279
+
280
+ ## Model Information
281
+
282
+ HunyuanOCR is a lightweight 1B VLM that excels at:
283
+ - 📝 **Document Parsing** - Full markdown extraction with reading order
284
+ - 📊 **Table Extraction** - HTML format tables
285
+ - 📐 **Formula Recognition** - LaTeX format formulas
286
+ - 📈 **Chart Parsing** - Mermaid/Markdown format
287
+ - 📍 **Text Spotting** - Detection with coordinates
288
+ - 🔍 **Information Extraction** - Key-value, fields, subtitles
289
+ - 🌐 **Translation** - Multilingual photo translation
290
+
291
+ ## Prompt Modes Available
292
+
293
+ - `parse-document` - Full document parsing (default)
294
+ - `parse-formula` - LaTeX formula extraction
295
+ - `parse-table` - HTML table extraction
296
+ - `parse-chart` - Chart/flowchart parsing
297
+ - `spot` - Text detection with coordinates
298
+ - `extract-key` - Extract specific key value
299
+ - `extract-fields` - Extract multiple fields as JSON
300
+ - `extract-subtitles` - Subtitle extraction
301
+ - `translate` - Document translation
302
+
303
+ ## Dataset Structure
304
+
305
+ The dataset contains all original columns plus:
306
+ - `markdown`: The extracted text in markdown format
307
+ - `inference_info`: JSON list tracking all OCR models applied to this dataset
308
+
309
+ ## Usage
310
+
311
+ ```python
312
+ from datasets import load_dataset
313
+ import json
314
+
315
+ # Load the dataset
316
+ dataset = load_dataset("{{output_dataset_id}}", split="{split}")
317
+
318
+ # Access the markdown text
319
+ for example in dataset:
320
+ print(example["markdown"])
321
+ break
322
+
323
+ # View all OCR models applied to this dataset
324
+ inference_info = json.loads(dataset[0]["inference_info"])
325
+ for info in inference_info:
326
+ print(f"Column: {{info['column_name']}} - Model: {{info['model_id']}}")
327
+ ```
328
+
329
+ ## Reproduction
330
+
331
+ This dataset was generated using the [uv-scripts/ocr](https://huggingface.co/datasets/uv-scripts/ocr) HunyuanOCR script:
332
+
333
+ ```bash
334
+ uv run https://huggingface.co/datasets/uv-scripts/ocr/raw/main/hunyuan-ocr.py \\
335
+ {source_dataset} \\
336
+ <output-dataset> \\
337
+ --image-column {image_column} \\
338
+ --batch-size {batch_size} \\
339
+ --prompt-mode {prompt_mode} \\
340
+ --max-model-len {max_model_len} \\
341
+ --max-tokens {max_tokens} \\
342
+ --gpu-memory-utilization {gpu_memory_utilization}
343
+ ```
344
+
345
+ Generated with [UV Scripts](https://huggingface.co/uv-scripts)
346
+ """
347
+
348
+
349
+ def main(
350
+ input_dataset: str,
351
+ output_dataset: str,
352
+ image_column: str = "image",
353
+ batch_size: int = 1, # Default to 1 due to vLLM V1 batching issues with HunyuanOCR
354
+ model: str = "tencent/HunyuanOCR",
355
+ max_model_len: int = 16384,
356
+ max_tokens: int = 16384,
357
+ gpu_memory_utilization: float = 0.8,
358
+ hf_token: str = None,
359
+ split: str = "train",
360
+ max_samples: int = None,
361
+ private: bool = False,
362
+ shuffle: bool = False,
363
+ seed: int = 42,
364
+ prompt_mode: str = "parse-document",
365
+ target_language: str = None,
366
+ key: str = None,
367
+ fields: List[str] = None,
368
+ use_chinese: bool = False,
369
+ custom_prompt: str = None,
370
+ output_column: str = "markdown",
371
+ clean_output: bool = True,
372
+ config: str = None,
373
+ create_pr: bool = False,
374
+ verbose: bool = False,
375
+ ):
376
+ """Process images from HF dataset through HunyuanOCR model."""
377
+
378
+ # Check CUDA availability first
379
+ check_cuda_availability()
380
+
381
+ # Track processing start time
382
+ start_time = datetime.now()
383
+
384
+ # Login to HF if token provided
385
+ HF_TOKEN = hf_token or os.environ.get("HF_TOKEN")
386
+ if HF_TOKEN:
387
+ login(token=HF_TOKEN)
388
+
389
+ # Determine prompt to use
390
+ if custom_prompt:
391
+ prompt = custom_prompt
392
+ logger.info(f"Using custom prompt: {prompt[:50]}...")
393
+ else:
394
+ prompt = get_prompt(
395
+ prompt_mode=prompt_mode,
396
+ use_chinese=use_chinese,
397
+ target_language=target_language,
398
+ key=key,
399
+ fields=fields,
400
+ )
401
+ lang_str = "Chinese" if use_chinese else "English"
402
+ logger.info(f"Using prompt mode: {prompt_mode} ({lang_str})")
403
+
404
+ # Load dataset
405
+ logger.info(f"Loading dataset: {input_dataset}")
406
+ dataset = load_dataset(input_dataset, split=split)
407
+
408
+ # Validate image column
409
+ if image_column not in dataset.column_names:
410
+ raise ValueError(
411
+ f"Column '{image_column}' not found. Available: {dataset.column_names}"
412
+ )
413
+
414
+ # Shuffle if requested
415
+ if shuffle:
416
+ logger.info(f"Shuffling dataset with seed {seed}")
417
+ dataset = dataset.shuffle(seed=seed)
418
+
419
+ # Limit samples if requested
420
+ if max_samples:
421
+ dataset = dataset.select(range(min(max_samples, len(dataset))))
422
+ logger.info(f"Limited to {len(dataset)} samples")
423
+
424
+ # Initialize vLLM model
425
+ logger.info(f"Initializing vLLM with model: {model}")
426
+ logger.info("This may take a few minutes on first run...")
427
+
428
+ # Note: HunyuanOCR has batching issues with vLLM V1 engine when batch_size > 1
429
+ # Using disable_mm_preprocessor_cache and limit_mm_per_prompt for stability
430
+ llm = LLM(
431
+ model=model,
432
+ trust_remote_code=True,
433
+ max_model_len=max_model_len,
434
+ gpu_memory_utilization=gpu_memory_utilization,
435
+ limit_mm_per_prompt={"image": 1},
436
+ disable_mm_preprocessor_cache=True,
437
+ enable_prefix_caching=False,
438
+ )
439
+
440
+ sampling_params = SamplingParams(
441
+ temperature=0.0, # Deterministic for OCR
442
+ max_tokens=max_tokens,
443
+ )
444
+
445
+ logger.info(f"Processing {len(dataset)} images in batches of {batch_size}")
446
+ logger.info(f"Output will be written to column: {output_column}")
447
+
448
+ # Process images in batches
449
+ all_outputs = []
450
+
451
+ for batch_indices in tqdm(
452
+ partition_all(batch_size, range(len(dataset))),
453
+ total=(len(dataset) + batch_size - 1) // batch_size,
454
+ desc="HunyuanOCR processing",
455
+ ):
456
+ batch_indices = list(batch_indices)
457
+ batch_images = [dataset[i][image_column] for i in batch_indices]
458
+
459
+ try:
460
+ # Create messages for batch
461
+ batch_messages = [make_ocr_message(img, prompt) for img in batch_images]
462
+
463
+ # Process with vLLM
464
+ outputs = llm.chat(batch_messages, sampling_params)
465
+
466
+ # Extract outputs
467
+ for output in outputs:
468
+ text = output.outputs[0].text.strip()
469
+ # Clean repeated substrings if enabled
470
+ if clean_output:
471
+ text = clean_repeated_substrings(text)
472
+ all_outputs.append(text)
473
+
474
+ except Exception as e:
475
+ logger.error(f"Error processing batch: {e}")
476
+ # Add error placeholders for failed batch
477
+ all_outputs.extend(["[OCR ERROR]"] * len(batch_images))
478
+
479
+ # Calculate processing time
480
+ processing_duration = datetime.now() - start_time
481
+ processing_time_str = f"{processing_duration.total_seconds() / 60:.1f} min"
482
+
483
+ # Add output column to dataset
484
+ logger.info(f"Adding '{output_column}' column to dataset")
485
+ dataset = dataset.add_column(output_column, all_outputs)
486
+
487
+ # Handle inference_info tracking (for multi-model comparisons)
488
+ inference_entry = {
489
+ "model_id": model,
490
+ "model_name": "HunyuanOCR",
491
+ "column_name": output_column,
492
+ "timestamp": datetime.now().isoformat(),
493
+ "prompt_mode": prompt_mode if not custom_prompt else "custom",
494
+ "prompt_language": "cn" if use_chinese else "en",
495
+ }
496
+
497
+ if "inference_info" in dataset.column_names:
498
+ # Append to existing inference info
499
+ logger.info("Updating existing inference_info column")
500
+
501
+ def update_inference_info(example):
502
+ try:
503
+ existing_info = (
504
+ json.loads(example["inference_info"])
505
+ if example["inference_info"]
506
+ else []
507
+ )
508
+ except (json.JSONDecodeError, TypeError):
509
+ existing_info = []
510
+
511
+ existing_info.append(inference_entry)
512
+ return {"inference_info": json.dumps(existing_info)}
513
+
514
+ dataset = dataset.map(update_inference_info)
515
+ else:
516
+ # Create new inference_info column
517
+ logger.info("Creating new inference_info column")
518
+ inference_list = [json.dumps([inference_entry])] * len(dataset)
519
+ dataset = dataset.add_column("inference_info", inference_list)
520
+
521
+ # Push to hub with retry and XET fallback
522
+ logger.info(f"Pushing to {output_dataset}")
523
+ commit_msg = f"Add HunyuanOCR OCR results ({len(dataset)} samples)" + (
524
+ f" [{config}]" if config else ""
525
+ )
526
+ max_retries = 3
527
+ for attempt in range(1, max_retries + 1):
528
+ try:
529
+ if attempt > 1:
530
+ logger.warning("Disabling XET (fallback to HTTP upload)")
531
+ os.environ["HF_HUB_DISABLE_XET"] = "1"
532
+ dataset.push_to_hub(
533
+ output_dataset,
534
+ private=private,
535
+ token=HF_TOKEN,
536
+ max_shard_size="500MB",
537
+ **({"config_name": config} if config else {}),
538
+ create_pr=create_pr,
539
+ commit_message=commit_msg,
540
+ )
541
+ break
542
+ except Exception as e:
543
+ logger.error(f"Upload attempt {attempt}/{max_retries} failed: {e}")
544
+ if attempt < max_retries:
545
+ delay = 30 * (2 ** (attempt - 1))
546
+ logger.info(f"Retrying in {delay}s...")
547
+ time.sleep(delay)
548
+ else:
549
+ logger.error("All upload attempts failed. OCR results are lost.")
550
+ sys.exit(1)
551
+
552
+ # Create and push dataset card (skip when creating PR to avoid conflicts)
553
+ if not create_pr:
554
+ logger.info("Creating dataset card")
555
+ card_content = create_dataset_card(
556
+ source_dataset=input_dataset,
557
+ model=model,
558
+ num_samples=len(dataset),
559
+ processing_time=processing_time_str,
560
+ batch_size=batch_size,
561
+ max_model_len=max_model_len,
562
+ max_tokens=max_tokens,
563
+ gpu_memory_utilization=gpu_memory_utilization,
564
+ image_column=image_column,
565
+ split=split,
566
+ prompt_mode=prompt_mode if not custom_prompt else "custom",
567
+ use_chinese=use_chinese,
568
+ )
569
+
570
+ card = DatasetCard(card_content)
571
+ card.push_to_hub(output_dataset, token=HF_TOKEN)
572
+
573
+ logger.info("HunyuanOCR processing complete!")
574
+ logger.info(
575
+ f"Dataset available at: https://huggingface.co/datasets/{output_dataset}"
576
+ )
577
+ logger.info(f"Processing time: {processing_time_str}")
578
+
579
+ if verbose:
580
+ import importlib.metadata
581
+
582
+ logger.info("--- Resolved package versions ---")
583
+ for pkg in [
584
+ "vllm",
585
+ "transformers",
586
+ "torch",
587
+ "datasets",
588
+ "pyarrow",
589
+ "pillow",
590
+ ]:
591
+ try:
592
+ logger.info(f" {pkg}=={importlib.metadata.version(pkg)}")
593
+ except importlib.metadata.PackageNotFoundError:
594
+ logger.info(f" {pkg}: not installed")
595
+ logger.info("--- End versions ---")
596
+
597
+
598
+ if __name__ == "__main__":
599
+ # Show example usage if no arguments
600
+ if len(sys.argv) == 1:
601
+ print("=" * 80)
602
+ print("HunyuanOCR Document Processing")
603
+ print("=" * 80)
604
+ print("\nLightweight 1B VLM from Tencent for multilingual document parsing")
605
+ print("\nFeatures:")
606
+ print("- 📝 Full document parsing to markdown")
607
+ print("- 📊 Table extraction (HTML format)")
608
+ print("- 📐 Formula recognition (LaTeX format)")
609
+ print("- 📍 Text spotting with coordinates")
610
+ print("- 🔍 Information extraction (key-value, fields)")
611
+ print("- 🌐 Photo translation")
612
+ print("\nExample usage:")
613
+ print("\n1. Basic document parsing:")
614
+ print(" uv run hunyuan-ocr.py input-dataset output-dataset")
615
+ print("\n2. Formula extraction:")
616
+ print(" uv run hunyuan-ocr.py math-docs formulas --prompt-mode parse-formula")
617
+ print("\n3. Table extraction:")
618
+ print(" uv run hunyuan-ocr.py docs tables --prompt-mode parse-table")
619
+ print("\n4. Text spotting with coordinates:")
620
+ print(" uv run hunyuan-ocr.py images spotted --prompt-mode spot")
621
+ print("\n5. Extract specific field:")
622
+ print(
623
+ ' uv run hunyuan-ocr.py invoices data --prompt-mode extract-key --key "Total Amount"'
624
+ )
625
+ print("\n6. Extract multiple fields as JSON:")
626
+ print(
627
+ ' uv run hunyuan-ocr.py forms data --prompt-mode extract-fields --fields "name,date,amount"'
628
+ )
629
+ print("\n7. Translate document to English:")
630
+ print(
631
+ " uv run hunyuan-ocr.py cn-docs en-docs --prompt-mode translate --target-language English"
632
+ )
633
+ print("\n8. Use Chinese prompts:")
634
+ print(" uv run hunyuan-ocr.py docs output --use-chinese-prompts")
635
+ print("\n9. Running on HF Jobs:")
636
+ print(" hf jobs uv run --flavor l4x1 \\")
637
+ print(
638
+ ' -e HF_TOKEN=$(python3 -c "from huggingface_hub import get_token; print(get_token())") \\'
639
+ )
640
+ print(
641
+ " https://huggingface.co/datasets/uv-scripts/ocr/raw/main/hunyuan-ocr.py \\"
642
+ )
643
+ print(" input-dataset output-dataset")
644
+ print("\n" + "=" * 80)
645
+ print("\nFor full help, run: uv run hunyuan-ocr.py --help")
646
+ sys.exit(0)
647
+
648
+ parser = argparse.ArgumentParser(
649
+ description="Document OCR using HunyuanOCR (1B lightweight VLM)",
650
+ formatter_class=argparse.RawDescriptionHelpFormatter,
651
+ epilog="""
652
+ Prompt Modes (official HunyuanOCR prompts):
653
+ parse-document - Full document parsing to markdown (default)
654
+ parse-formula - LaTeX formula extraction
655
+ parse-table - HTML table extraction
656
+ parse-chart - Chart/flowchart parsing (Mermaid/Markdown)
657
+ spot - Text detection with coordinates
658
+ extract-key - Extract specific key value (requires --key)
659
+ extract-fields - Extract multiple fields as JSON (requires --fields)
660
+ extract-subtitles - Subtitle extraction
661
+ translate - Document translation (requires --target-language)
662
+
663
+ Examples:
664
+ # Basic document OCR (default)
665
+ uv run hunyuan-ocr.py my-docs analyzed-docs
666
+
667
+ # Extract formulas as LaTeX
668
+ uv run hunyuan-ocr.py math-papers formulas --prompt-mode parse-formula
669
+
670
+ # Extract tables as HTML
671
+ uv run hunyuan-ocr.py reports tables --prompt-mode parse-table
672
+
673
+ # Text spotting with coordinates
674
+ uv run hunyuan-ocr.py images spotted --prompt-mode spot
675
+
676
+ # Extract specific key from forms
677
+ uv run hunyuan-ocr.py invoices amounts --prompt-mode extract-key --key "Total"
678
+
679
+ # Extract multiple fields as JSON
680
+ uv run hunyuan-ocr.py forms data --prompt-mode extract-fields --fields "name,date,amount"
681
+
682
+ # Translate Chinese documents to English
683
+ uv run hunyuan-ocr.py cn-docs translated --prompt-mode translate --target-language English
684
+
685
+ # Use Chinese prompts
686
+ uv run hunyuan-ocr.py docs output --use-chinese-prompts
687
+
688
+ # Random sampling for testing
689
+ uv run hunyuan-ocr.py large-dataset test --max-samples 50 --shuffle
690
+ """,
691
+ )
692
+
693
+ parser.add_argument("input_dataset", help="Input dataset ID from Hugging Face Hub")
694
+ parser.add_argument("output_dataset", help="Output dataset ID for Hugging Face Hub")
695
+ parser.add_argument(
696
+ "--image-column",
697
+ default="image",
698
+ help="Column containing images (default: image)",
699
+ )
700
+ parser.add_argument(
701
+ "--batch-size",
702
+ type=int,
703
+ default=1,
704
+ help="Batch size for processing (default: 1, higher values may cause vLLM errors)",
705
+ )
706
+ parser.add_argument(
707
+ "--model",
708
+ default="tencent/HunyuanOCR",
709
+ help="Model to use (default: tencent/HunyuanOCR)",
710
+ )
711
+ parser.add_argument(
712
+ "--max-model-len",
713
+ type=int,
714
+ default=16384,
715
+ help="Maximum model context length (default: 16384)",
716
+ )
717
+ parser.add_argument(
718
+ "--max-tokens",
719
+ type=int,
720
+ default=16384,
721
+ help="Maximum tokens to generate (default: 16384)",
722
+ )
723
+ parser.add_argument(
724
+ "--gpu-memory-utilization",
725
+ type=float,
726
+ default=0.8,
727
+ help="GPU memory utilization (default: 0.8)",
728
+ )
729
+ parser.add_argument("--hf-token", help="Hugging Face API token")
730
+ parser.add_argument(
731
+ "--split", default="train", help="Dataset split to use (default: train)"
732
+ )
733
+ parser.add_argument(
734
+ "--max-samples",
735
+ type=int,
736
+ help="Maximum number of samples to process (for testing)",
737
+ )
738
+ parser.add_argument(
739
+ "--private", action="store_true", help="Make output dataset private"
740
+ )
741
+ parser.add_argument(
742
+ "--shuffle", action="store_true", help="Shuffle dataset before processing"
743
+ )
744
+ parser.add_argument(
745
+ "--seed",
746
+ type=int,
747
+ default=42,
748
+ help="Random seed for shuffling (default: 42)",
749
+ )
750
+ parser.add_argument(
751
+ "--prompt-mode",
752
+ choices=[
753
+ "parse-document",
754
+ "parse-formula",
755
+ "parse-table",
756
+ "parse-chart",
757
+ "spot",
758
+ "extract-key",
759
+ "extract-fields",
760
+ "extract-subtitles",
761
+ "translate",
762
+ ],
763
+ default="parse-document",
764
+ help="Prompt template to use (default: parse-document)",
765
+ )
766
+ parser.add_argument(
767
+ "--target-language",
768
+ help="Target language for translation mode (e.g., 'English', 'Chinese')",
769
+ )
770
+ parser.add_argument(
771
+ "--key",
772
+ help="Key to extract for extract-key mode",
773
+ )
774
+ parser.add_argument(
775
+ "--fields",
776
+ help="Comma-separated list of fields for extract-fields mode",
777
+ )
778
+ parser.add_argument(
779
+ "--use-chinese-prompts",
780
+ action="store_true",
781
+ help="Use Chinese versions of prompts",
782
+ )
783
+ parser.add_argument(
784
+ "--custom-prompt",
785
+ help="Custom prompt text (overrides --prompt-mode)",
786
+ )
787
+ parser.add_argument(
788
+ "--output-column",
789
+ default="markdown",
790
+ help="Column name for output text (default: markdown)",
791
+ )
792
+ parser.add_argument(
793
+ "--no-clean-output",
794
+ action="store_true",
795
+ help="Disable cleaning of repeated substrings in output",
796
+ )
797
+ parser.add_argument(
798
+ "--config",
799
+ help="Dataset config name for multi-model benchmarks",
800
+ )
801
+ parser.add_argument(
802
+ "--create-pr",
803
+ action="store_true",
804
+ help="Push results as a pull request instead of direct commit",
805
+ )
806
+ parser.add_argument(
807
+ "--verbose",
808
+ action="store_true",
809
+ help="Log resolved package versions at the end of the run",
810
+ )
811
+
812
+ args = parser.parse_args()
813
+
814
+ # Parse fields if provided
815
+ fields_list = None
816
+ if args.fields:
817
+ fields_list = [f.strip() for f in args.fields.split(",")]
818
+
819
+ main(
820
+ input_dataset=args.input_dataset,
821
+ output_dataset=args.output_dataset,
822
+ image_column=args.image_column,
823
+ batch_size=args.batch_size,
824
+ model=args.model,
825
+ max_model_len=args.max_model_len,
826
+ max_tokens=args.max_tokens,
827
+ gpu_memory_utilization=args.gpu_memory_utilization,
828
+ hf_token=args.hf_token,
829
+ split=args.split,
830
+ max_samples=args.max_samples,
831
+ private=args.private,
832
+ shuffle=args.shuffle,
833
+ seed=args.seed,
834
+ prompt_mode=args.prompt_mode,
835
+ target_language=args.target_language,
836
+ key=args.key,
837
+ fields=fields_list,
838
+ use_chinese=args.use_chinese_prompts,
839
+ custom_prompt=args.custom_prompt,
840
+ output_column=args.output_column,
841
+ clean_output=not args.no_clean_output,
842
+ config=args.config,
843
+ create_pr=args.create_pr,
844
+ verbose=args.verbose,
845
+ )
lighton-ocr.py CHANGED
@@ -2,7 +2,7 @@
2
  # requires-python = ">=3.11"
3
  # dependencies = [
4
  # "datasets",
5
- # "huggingface-hub[hf_transfer]",
6
  # "pillow",
7
  # "vllm",
8
  # "tqdm",
@@ -12,7 +12,7 @@
12
  # ]
13
  #
14
  # [[tool.uv.index]]
15
- # url = "https://wheels.vllm.ai/nightly"
16
  #
17
  # [tool.uv]
18
  # prerelease = "allow"
@@ -300,9 +300,6 @@ def main(
300
  # Track processing start time
301
  start_time = datetime.now()
302
 
303
- # Enable HF_TRANSFER for faster downloads
304
- os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
305
-
306
  # Login to HF if token provided
307
  HF_TOKEN = hf_token or os.environ.get("HF_TOKEN")
308
  if HF_TOKEN:
@@ -337,7 +334,7 @@ def main(
337
  logger.info(f"Limited to {len(dataset)} samples")
338
 
339
  # Initialize vLLM model
340
- logger.info(f"Initializing vLLM with LightOnOCR")
341
  logger.info("This may take a few minutes on first run...")
342
  llm = LLM(
343
  model=model,
@@ -418,7 +415,11 @@ def main(
418
 
419
  def update_inference_info(example):
420
  try:
421
- existing_info = json.loads(example["inference_info"]) if example["inference_info"] else []
 
 
 
 
422
  except (json.JSONDecodeError, TypeError):
423
  existing_info = []
424
 
@@ -459,9 +460,13 @@ def main(
459
  card.push_to_hub(output_dataset, token=HF_TOKEN)
460
 
461
  logger.info("✅ LightOnOCR processing complete!")
462
- logger.info(f"Dataset available at: https://huggingface.co/datasets/{output_dataset}")
 
 
463
  logger.info(f"Processing time: {processing_time_str}")
464
- logger.info(f"Processing speed: {len(dataset) / processing_duration.total_seconds():.2f} images/sec")
 
 
465
 
466
 
467
  if __name__ == "__main__":
@@ -491,9 +496,12 @@ if __name__ == "__main__":
491
  print(" uv run lighton-ocr.py docs output --no-resize")
492
  print("\n6. Running on HF Jobs:")
493
  print(" hf jobs uv run --flavor l4x1 \\")
494
- print(" -e HF_TOKEN=$(python3 -c \"from huggingface_hub import get_token; print(get_token())\") \\")
495
- print(" -e HF_HUB_ENABLE_HF_TRANSFER=1 \\")
496
- print(" https://huggingface.co/datasets/uv-scripts/ocr/raw/main/lighton-ocr.py \\")
 
 
 
497
  print(" input-dataset output-dataset --vocab-size 32k")
498
  print("\n" + "=" * 80)
499
  print("\nVocabulary Size Options:")
 
2
  # requires-python = ">=3.11"
3
  # dependencies = [
4
  # "datasets",
5
+ # "huggingface-hub",
6
  # "pillow",
7
  # "vllm",
8
  # "tqdm",
 
12
  # ]
13
  #
14
  # [[tool.uv.index]]
15
+ # url = "https://wheels.vllm.ai/nightly/cu129"
16
  #
17
  # [tool.uv]
18
  # prerelease = "allow"
 
300
  # Track processing start time
301
  start_time = datetime.now()
302
 
 
 
 
303
  # Login to HF if token provided
304
  HF_TOKEN = hf_token or os.environ.get("HF_TOKEN")
305
  if HF_TOKEN:
 
334
  logger.info(f"Limited to {len(dataset)} samples")
335
 
336
  # Initialize vLLM model
337
+ logger.info("Initializing vLLM with LightOnOCR")
338
  logger.info("This may take a few minutes on first run...")
339
  llm = LLM(
340
  model=model,
 
415
 
416
  def update_inference_info(example):
417
  try:
418
+ existing_info = (
419
+ json.loads(example["inference_info"])
420
+ if example["inference_info"]
421
+ else []
422
+ )
423
  except (json.JSONDecodeError, TypeError):
424
  existing_info = []
425
 
 
460
  card.push_to_hub(output_dataset, token=HF_TOKEN)
461
 
462
  logger.info("✅ LightOnOCR processing complete!")
463
+ logger.info(
464
+ f"Dataset available at: https://huggingface.co/datasets/{output_dataset}"
465
+ )
466
  logger.info(f"Processing time: {processing_time_str}")
467
+ logger.info(
468
+ f"Processing speed: {len(dataset) / processing_duration.total_seconds():.2f} images/sec"
469
+ )
470
 
471
 
472
  if __name__ == "__main__":
 
496
  print(" uv run lighton-ocr.py docs output --no-resize")
497
  print("\n6. Running on HF Jobs:")
498
  print(" hf jobs uv run --flavor l4x1 \\")
499
+ print(
500
+ ' -e HF_TOKEN=$(python3 -c "from huggingface_hub import get_token; print(get_token())") \\'
501
+ )
502
+ print(
503
+ " https://huggingface.co/datasets/uv-scripts/ocr/raw/main/lighton-ocr.py \\"
504
+ )
505
  print(" input-dataset output-dataset --vocab-size 32k")
506
  print("\n" + "=" * 80)
507
  print("\nVocabulary Size Options:")
lighton-ocr2.py CHANGED
@@ -12,7 +12,7 @@
12
  # ]
13
  #
14
  # [[tool.uv.index]]
15
- # url = "https://wheels.vllm.ai/nightly"
16
  #
17
  # [tool.uv]
18
  # prerelease = "allow"
@@ -291,6 +291,7 @@ def main(
291
  output_column: str = "markdown",
292
  config: str = None,
293
  create_pr: bool = False,
 
294
  ):
295
  """Process images from HF dataset through LightOnOCR-2 model."""
296
 
@@ -408,7 +409,11 @@ def main(
408
 
409
  def update_inference_info(example):
410
  try:
411
- existing_info = json.loads(example["inference_info"]) if example["inference_info"] else []
 
 
 
 
412
  except (json.JSONDecodeError, TypeError):
413
  existing_info = []
414
 
@@ -428,7 +433,7 @@ def main(
428
  output_dataset,
429
  private=private,
430
  token=HF_TOKEN,
431
- config_name=config,
432
  create_pr=create_pr,
433
  commit_message=f"Add {MODEL} OCR results ({len(dataset)} samples)"
434
  + (f" [{config}]" if config else ""),
@@ -456,9 +461,24 @@ def main(
456
  card.push_to_hub(output_dataset, token=HF_TOKEN)
457
 
458
  logger.info("✅ LightOnOCR-2 processing complete!")
459
- logger.info(f"Dataset available at: https://huggingface.co/datasets/{output_dataset}")
 
 
460
  logger.info(f"Processing time: {processing_time_str}")
461
- logger.info(f"Processing speed: {len(dataset) / processing_duration.total_seconds():.2f} images/sec")
 
 
 
 
 
 
 
 
 
 
 
 
 
462
 
463
 
464
  if __name__ == "__main__":
@@ -489,7 +509,9 @@ if __name__ == "__main__":
489
  print("\n5. Running on HF Jobs:")
490
  print(" hf jobs uv run --flavor l4x1 \\")
491
  print(" -s HF_TOKEN \\")
492
- print(" https://huggingface.co/datasets/uv-scripts/ocr/raw/main/lighton-ocr2.py \\")
 
 
493
  print(" input-dataset output-dataset --batch-size 32")
494
  print("\n" + "=" * 80)
495
  print("\nKey Improvements over v1:")
@@ -612,6 +634,11 @@ Examples:
612
  default="markdown",
613
  help="Column name for output text (default: markdown)",
614
  )
 
 
 
 
 
615
 
616
  args = parser.parse_args()
617
 
@@ -636,4 +663,5 @@ Examples:
636
  output_column=args.output_column,
637
  config=args.config,
638
  create_pr=args.create_pr,
 
639
  )
 
12
  # ]
13
  #
14
  # [[tool.uv.index]]
15
+ # url = "https://wheels.vllm.ai/nightly/cu129"
16
  #
17
  # [tool.uv]
18
  # prerelease = "allow"
 
291
  output_column: str = "markdown",
292
  config: str = None,
293
  create_pr: bool = False,
294
+ verbose: bool = False,
295
  ):
296
  """Process images from HF dataset through LightOnOCR-2 model."""
297
 
 
409
 
410
  def update_inference_info(example):
411
  try:
412
+ existing_info = (
413
+ json.loads(example["inference_info"])
414
+ if example["inference_info"]
415
+ else []
416
+ )
417
  except (json.JSONDecodeError, TypeError):
418
  existing_info = []
419
 
 
433
  output_dataset,
434
  private=private,
435
  token=HF_TOKEN,
436
+ **({"config_name": config} if config else {}),
437
  create_pr=create_pr,
438
  commit_message=f"Add {MODEL} OCR results ({len(dataset)} samples)"
439
  + (f" [{config}]" if config else ""),
 
461
  card.push_to_hub(output_dataset, token=HF_TOKEN)
462
 
463
  logger.info("✅ LightOnOCR-2 processing complete!")
464
+ logger.info(
465
+ f"Dataset available at: https://huggingface.co/datasets/{output_dataset}"
466
+ )
467
  logger.info(f"Processing time: {processing_time_str}")
468
+ logger.info(
469
+ f"Processing speed: {len(dataset) / processing_duration.total_seconds():.2f} images/sec"
470
+ )
471
+
472
+ if verbose:
473
+ import importlib.metadata
474
+
475
+ logger.info("--- Resolved package versions ---")
476
+ for pkg in ["vllm", "transformers", "torch", "datasets", "pyarrow", "pillow"]:
477
+ try:
478
+ logger.info(f" {pkg}=={importlib.metadata.version(pkg)}")
479
+ except importlib.metadata.PackageNotFoundError:
480
+ logger.info(f" {pkg}: not installed")
481
+ logger.info("--- End versions ---")
482
 
483
 
484
  if __name__ == "__main__":
 
509
  print("\n5. Running on HF Jobs:")
510
  print(" hf jobs uv run --flavor l4x1 \\")
511
  print(" -s HF_TOKEN \\")
512
+ print(
513
+ " https://huggingface.co/datasets/uv-scripts/ocr/raw/main/lighton-ocr2.py \\"
514
+ )
515
  print(" input-dataset output-dataset --batch-size 32")
516
  print("\n" + "=" * 80)
517
  print("\nKey Improvements over v1:")
 
634
  default="markdown",
635
  help="Column name for output text (default: markdown)",
636
  )
637
+ parser.add_argument(
638
+ "--verbose",
639
+ action="store_true",
640
+ help="Log resolved package versions after processing (useful for pinning deps)",
641
+ )
642
 
643
  args = parser.parse_args()
644
 
 
663
  output_column=args.output_column,
664
  config=args.config,
665
  create_pr=args.create_pr,
666
+ verbose=args.verbose,
667
  )
numarkdown-ocr.py CHANGED
@@ -2,7 +2,7 @@
2
  # requires-python = ">=3.11"
3
  # dependencies = [
4
  # "datasets",
5
- # "huggingface-hub[hf_transfer]",
6
  # "pillow",
7
  # "vllm",
8
  # "tqdm",
@@ -37,13 +37,13 @@ import logging
37
  import os
38
  import re
39
  import sys
40
- from typing import Any, Dict, List, Union, Optional, Tuple
41
  from datetime import datetime
 
42
 
43
- import torch
44
  from torch import cuda
45
  from datasets import load_dataset
46
- from huggingface_hub import DatasetCard, HfApi, login
47
  from PIL import Image
48
  from toolz import partition_all
49
  from tqdm.auto import tqdm
@@ -57,15 +57,17 @@ def check_gpu_availability() -> int:
57
  """Check if CUDA is available and return the number of GPUs."""
58
  if not cuda.is_available():
59
  logger.error("CUDA is not available. This script requires a GPU.")
60
- logger.error("Please run on a machine with NVIDIA GPU or use HF Jobs with GPU flavor.")
 
 
61
  sys.exit(1)
62
-
63
  num_gpus = cuda.device_count()
64
  for i in range(num_gpus):
65
  gpu_name = cuda.get_device_name(i)
66
  gpu_memory = cuda.get_device_properties(i).total_memory / 1024**3
67
  logger.info(f"GPU {i}: {gpu_name} with {gpu_memory:.1f} GB memory")
68
-
69
  return num_gpus
70
 
71
 
@@ -77,27 +79,29 @@ def validate_and_resize_image(
77
  """Validate and resize image to meet pixel constraints if necessary."""
78
  width, height = image.size
79
  total_pixels = width * height
80
-
81
  if total_pixels < min_pixels or total_pixels > max_pixels:
82
  # Calculate scaling factor
83
  if total_pixels < min_pixels:
84
  scale = (min_pixels / total_pixels) ** 0.5
85
  else:
86
  scale = (max_pixels / total_pixels) ** 0.5
87
-
88
  new_width = int(width * scale)
89
  new_height = int(height * scale)
90
-
91
- logger.debug(f"Resizing image from {width}x{height} to {new_width}x{new_height}")
 
 
92
  image = image.resize((new_width, new_height), Image.Resampling.LANCZOS)
93
-
94
  return image
95
 
96
 
97
  def extract_answer_from_thinking(text: str, include_thinking: bool = False) -> str:
98
  """
99
  Extract the final answer from NuMarkdown's thinking output.
100
-
101
  The model generates output in format:
102
  <think>reasoning process...</think>
103
  <answer>final markdown output</answer>
@@ -105,27 +109,27 @@ def extract_answer_from_thinking(text: str, include_thinking: bool = False) -> s
105
  if include_thinking:
106
  # Return the full output including thinking traces
107
  return text.strip()
108
-
109
  # Extract content between <answer> tags
110
- answer_pattern = r'<answer>(.*?)</answer>'
111
  answer_match = re.search(answer_pattern, text, re.DOTALL)
112
-
113
  if answer_match:
114
  return answer_match.group(1).strip()
115
-
116
  # If no answer tags found, check if the entire text is markdown
117
  # (sometimes the model might not use tags)
118
- if not '<think>' in text and not '<answer>' in text:
119
  return text.strip()
120
-
121
  # Fallback: return everything after </think> if present
122
- think_end = text.find('</think>')
123
  if think_end != -1:
124
- remaining = text[think_end + 8:].strip()
125
  # Remove <answer> tags if present
126
- remaining = remaining.replace('<answer>', '').replace('</answer>', '').strip()
127
  return remaining
128
-
129
  # Last resort: return the full text
130
  logger.warning("Could not extract answer from thinking tokens, returning full text")
131
  return text.strip()
@@ -145,15 +149,15 @@ def make_numarkdown_message(
145
  pil_img = Image.open(image).convert("RGB")
146
  else:
147
  raise ValueError(f"Unsupported image type: {type(image)}")
148
-
149
  # Validate and resize if necessary
150
  pil_img = validate_and_resize_image(pil_img)
151
-
152
  # Convert to base64 data URI
153
  buf = io.BytesIO()
154
  pil_img.save(buf, format="PNG")
155
  data_uri = f"data:image/png;base64,{base64.b64encode(buf.getvalue()).decode()}"
156
-
157
  # Return message in vLLM chat format
158
  return [
159
  {
@@ -182,7 +186,7 @@ def create_dataset_card(
182
  ) -> str:
183
  """Create a dataset card documenting the OCR process."""
184
  model_name = model.split("/")[-1]
185
-
186
  return f"""---
187
  tags:
188
  - ocr
@@ -308,15 +312,19 @@ def main(
308
  include_thinking: bool = False,
309
  temperature: float = 0.0,
310
  custom_prompt: Optional[str] = None,
 
 
 
 
311
  ):
312
  """Process images from HF dataset through NuMarkdown model.
313
-
314
  The max_tokens parameter controls the total token budget for both
315
  thinking and answer phases. For complex documents with extensive
316
  reasoning, the default of 16384 tokens provides ample room for both
317
  the thinking process and the final markdown output.
318
  """
319
-
320
  # GPU check and configuration
321
  num_gpus = check_gpu_availability()
322
  if tensor_parallel_size is None:
@@ -330,38 +338,35 @@ def main(
330
  logger.warning(
331
  f"Requested {tensor_parallel_size} GPUs but only {num_gpus} available"
332
  )
333
-
334
  # Track processing start time
335
  start_time = datetime.now()
336
-
337
- # Enable HF_TRANSFER for faster downloads
338
- os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
339
-
340
  # Login to HF if token provided
341
  HF_TOKEN = hf_token or os.environ.get("HF_TOKEN")
342
  if HF_TOKEN:
343
  login(token=HF_TOKEN)
344
-
345
  # Load dataset
346
  logger.info(f"Loading dataset: {input_dataset}")
347
  dataset = load_dataset(input_dataset, split=split)
348
-
349
  # Validate image column
350
  if image_column not in dataset.column_names:
351
  raise ValueError(
352
  f"Column '{image_column}' not found. Available: {dataset.column_names}"
353
  )
354
-
355
  # Shuffle if requested
356
  if shuffle:
357
  logger.info(f"Shuffling dataset with seed {seed}")
358
  dataset = dataset.shuffle(seed=seed)
359
-
360
  # Limit samples if requested
361
  if max_samples:
362
  dataset = dataset.select(range(min(max_samples, len(dataset))))
363
  logger.info(f"Limited to {len(dataset)} samples")
364
-
365
  # Initialize vLLM with trust_remote_code for NuMarkdown
366
  logger.info(f"Initializing vLLM with model: {model}")
367
  logger.info(f"Using {tensor_parallel_size} GPU(s) for inference")
@@ -373,22 +378,25 @@ def main(
373
  tensor_parallel_size=tensor_parallel_size,
374
  limit_mm_per_prompt={"image": 1},
375
  )
376
-
377
  # Set up sampling parameters
378
  sampling_params = SamplingParams(
379
  temperature=temperature,
380
  max_tokens=max_tokens,
381
  )
382
-
383
  # Use custom prompt if provided, otherwise use default
384
- prompt = custom_prompt or "Convert this document to markdown. Focus on preserving structure, tables, formulas, and all textual content."
385
-
 
 
 
386
  # Process images in batches
387
  all_markdown = []
388
-
389
  logger.info(f"Processing {len(dataset)} images in batches of {batch_size}")
390
  logger.info(f"Including thinking traces: {include_thinking}")
391
-
392
  # Process in batches to avoid memory issues
393
  for batch_indices in tqdm(
394
  partition_all(batch_size, range(len(dataset))),
@@ -397,80 +405,97 @@ def main(
397
  ):
398
  batch_indices = list(batch_indices)
399
  batch_images = [dataset[i][image_column] for i in batch_indices]
400
-
401
  try:
402
  # Create messages for batch
403
  batch_messages = [
404
  make_numarkdown_message(img, prompt) for img in batch_images
405
  ]
406
-
407
  # Process with vLLM
408
  outputs = llm.chat(batch_messages, sampling_params)
409
-
410
  # Extract markdown from outputs
411
  for output in outputs:
412
  raw_text = output.outputs[0].text.strip()
413
  # Extract answer from thinking tokens
414
  markdown_text = extract_answer_from_thinking(raw_text, include_thinking)
415
  all_markdown.append(markdown_text)
416
-
417
  except Exception as e:
418
  logger.error(f"Error processing batch: {e}")
419
  # Add error placeholders for failed batch
420
  all_markdown.extend(["[OCR FAILED]"] * len(batch_images))
421
-
422
- # Add markdown column to dataset
423
- logger.info("Adding markdown column to dataset")
424
- dataset = dataset.add_column("markdown", all_markdown)
425
-
426
  # Handle inference_info tracking
427
- logger.info("Updating inference_info...")
428
-
429
- # Check for existing inference_info
430
- if "inference_info" in dataset.column_names:
431
- # Parse existing info from first row (all rows have same info)
432
- try:
433
- existing_info = json.loads(dataset[0]["inference_info"])
434
- if not isinstance(existing_info, list):
435
- existing_info = [existing_info] # Convert old format to list
436
- except (json.JSONDecodeError, TypeError):
437
- existing_info = []
438
- # Remove old column to update it
439
- dataset = dataset.remove_columns(["inference_info"])
440
- else:
441
- existing_info = []
442
-
443
- # Add new inference info
444
- new_info = {
445
- "column_name": "markdown",
446
  "model_id": model,
447
- "processing_date": datetime.now().isoformat(),
448
- "batch_size": batch_size,
449
- "max_tokens": max_tokens,
450
- "gpu_memory_utilization": gpu_memory_utilization,
451
- "max_model_len": max_model_len,
452
  "include_thinking": include_thinking,
453
  "temperature": temperature,
454
- "prompt": prompt,
455
- "script": "numarkdown-ocr.py",
456
- "script_version": "1.0.0",
457
- "script_url": "https://huggingface.co/datasets/uv-scripts/ocr/raw/main/numarkdown-ocr.py"
458
  }
459
- existing_info.append(new_info)
460
-
461
- # Add updated inference_info column
462
- info_json = json.dumps(existing_info, ensure_ascii=False)
463
- dataset = dataset.add_column("inference_info", [info_json] * len(dataset))
464
-
465
- # Push to hub
466
- logger.info(f"Pushing to {output_dataset}")
467
- dataset.push_to_hub(output_dataset, private=private, token=HF_TOKEN)
468
-
 
 
 
 
 
 
 
 
 
 
 
 
469
  # Calculate processing time
470
- end_time = datetime.now()
471
- processing_duration = end_time - start_time
472
  processing_time = f"{processing_duration.total_seconds() / 60:.1f} minutes"
473
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
474
  # Create and push dataset card
475
  logger.info("Creating dataset card...")
476
  card_content = create_dataset_card(
@@ -487,28 +512,26 @@ def main(
487
  image_column=image_column,
488
  split=split,
489
  )
490
-
491
- # Handle dataset card push with proper repo_id
492
- full_repo_id = output_dataset
493
- try:
494
- card = DatasetCard(card_content)
495
- # If output_dataset doesn't contain a username, get the current user's name
496
- if "/" not in output_dataset:
497
- api = HfApi(token=HF_TOKEN)
498
- user_info = api.whoami()
499
- full_repo_id = f"{user_info['name']}/{output_dataset}"
500
- logger.info(f"Using full repo ID: {full_repo_id}")
501
-
502
- card.push_to_hub(full_repo_id, token=HF_TOKEN)
503
- logger.info("✅ Dataset card created and pushed!")
504
- except Exception as e:
505
- logger.warning(f"Could not push dataset card: {e}")
506
- logger.info("Dataset was successfully created but card upload failed. You can add it manually.")
507
-
508
- logger.info("✅ OCR conversion complete!")
509
  logger.info(
510
- f"Dataset available at: https://huggingface.co/datasets/{full_repo_id}"
511
  )
 
 
 
 
 
 
 
 
 
 
 
 
512
 
513
 
514
  if __name__ == "__main__":
@@ -530,7 +553,9 @@ if __name__ == "__main__":
530
  print("\n1. Basic OCR conversion:")
531
  print(" uv run numarkdown-ocr.py document-images markdown-docs")
532
  print("\n2. Include thinking traces:")
533
- print(" uv run numarkdown-ocr.py complex-docs analyzed-docs --include-thinking")
 
 
534
  print("\n3. With custom settings:")
535
  print(" uv run numarkdown-ocr.py scientific-papers extracted-text \\")
536
  print(" --batch-size 8 \\")
@@ -540,19 +565,27 @@ if __name__ == "__main__":
540
  print(" uv run numarkdown-ocr.py large-dataset test-output --max-samples 10")
541
  print("\n5. Custom prompt for specific needs:")
542
  print(" uv run numarkdown-ocr.py invoices invoice-data \\")
543
- print(' --custom-prompt "Extract all invoice details including line items"')
 
 
544
  print("\n6. Multi-GPU processing:")
545
- print(" uv run numarkdown-ocr.py large-docs processed-docs --tensor-parallel-size 2")
 
 
546
  print("\n7. Running on HF Jobs:")
547
  print(" hf jobs uv run --flavor a100x2 \\")
548
- print(' -e HF_TOKEN=$(python3 -c "from huggingface_hub import get_token; print(get_token())") \\')
549
- print(" https://huggingface.co/datasets/uv-scripts/ocr/raw/main/numarkdown-ocr.py \\")
 
 
 
 
550
  print(" your-document-dataset \\")
551
  print(" your-markdown-output")
552
  print("\n" + "=" * 80)
553
  print("\nFor full help, run: uv run numarkdown-ocr.py --help")
554
  sys.exit(0)
555
-
556
  parser = argparse.ArgumentParser(
557
  description="OCR images to markdown using NuMarkdown-8B-Thinking with reasoning",
558
  formatter_class=argparse.RawDescriptionHelpFormatter,
@@ -577,7 +610,7 @@ Examples:
577
  uv run numarkdown-ocr.py ordered-dataset random-sample --max-samples 50 --shuffle
578
  """,
579
  )
580
-
581
  parser.add_argument("input_dataset", help="Input dataset ID from Hugging Face Hub")
582
  parser.add_argument("output_dataset", help="Output dataset ID for Hugging Face Hub")
583
  parser.add_argument(
@@ -658,9 +691,28 @@ Examples:
658
  type=str,
659
  help="Custom prompt for the model (overrides default)",
660
  )
661
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
662
  args = parser.parse_args()
663
-
664
  main(
665
  input_dataset=args.input_dataset,
666
  output_dataset=args.output_dataset,
@@ -680,4 +732,8 @@ Examples:
680
  include_thinking=args.include_thinking,
681
  temperature=args.temperature,
682
  custom_prompt=args.custom_prompt,
683
- )
 
 
 
 
 
2
  # requires-python = ">=3.11"
3
  # dependencies = [
4
  # "datasets",
5
+ # "huggingface-hub",
6
  # "pillow",
7
  # "vllm",
8
  # "tqdm",
 
37
  import os
38
  import re
39
  import sys
40
+ import time
41
  from datetime import datetime
42
+ from typing import Any, Dict, List, Optional, Union
43
 
 
44
  from torch import cuda
45
  from datasets import load_dataset
46
+ from huggingface_hub import DatasetCard, login
47
  from PIL import Image
48
  from toolz import partition_all
49
  from tqdm.auto import tqdm
 
57
  """Check if CUDA is available and return the number of GPUs."""
58
  if not cuda.is_available():
59
  logger.error("CUDA is not available. This script requires a GPU.")
60
+ logger.error(
61
+ "Please run on a machine with NVIDIA GPU or use HF Jobs with GPU flavor."
62
+ )
63
  sys.exit(1)
64
+
65
  num_gpus = cuda.device_count()
66
  for i in range(num_gpus):
67
  gpu_name = cuda.get_device_name(i)
68
  gpu_memory = cuda.get_device_properties(i).total_memory / 1024**3
69
  logger.info(f"GPU {i}: {gpu_name} with {gpu_memory:.1f} GB memory")
70
+
71
  return num_gpus
72
 
73
 
 
79
  """Validate and resize image to meet pixel constraints if necessary."""
80
  width, height = image.size
81
  total_pixels = width * height
82
+
83
  if total_pixels < min_pixels or total_pixels > max_pixels:
84
  # Calculate scaling factor
85
  if total_pixels < min_pixels:
86
  scale = (min_pixels / total_pixels) ** 0.5
87
  else:
88
  scale = (max_pixels / total_pixels) ** 0.5
89
+
90
  new_width = int(width * scale)
91
  new_height = int(height * scale)
92
+
93
+ logger.debug(
94
+ f"Resizing image from {width}x{height} to {new_width}x{new_height}"
95
+ )
96
  image = image.resize((new_width, new_height), Image.Resampling.LANCZOS)
97
+
98
  return image
99
 
100
 
101
  def extract_answer_from_thinking(text: str, include_thinking: bool = False) -> str:
102
  """
103
  Extract the final answer from NuMarkdown's thinking output.
104
+
105
  The model generates output in format:
106
  <think>reasoning process...</think>
107
  <answer>final markdown output</answer>
 
109
  if include_thinking:
110
  # Return the full output including thinking traces
111
  return text.strip()
112
+
113
  # Extract content between <answer> tags
114
+ answer_pattern = r"<answer>(.*?)</answer>"
115
  answer_match = re.search(answer_pattern, text, re.DOTALL)
116
+
117
  if answer_match:
118
  return answer_match.group(1).strip()
119
+
120
  # If no answer tags found, check if the entire text is markdown
121
  # (sometimes the model might not use tags)
122
+ if "<think>" not in text and "<answer>" not in text:
123
  return text.strip()
124
+
125
  # Fallback: return everything after </think> if present
126
+ think_end = text.find("</think>")
127
  if think_end != -1:
128
+ remaining = text[think_end + 8 :].strip()
129
  # Remove <answer> tags if present
130
+ remaining = remaining.replace("<answer>", "").replace("</answer>", "").strip()
131
  return remaining
132
+
133
  # Last resort: return the full text
134
  logger.warning("Could not extract answer from thinking tokens, returning full text")
135
  return text.strip()
 
149
  pil_img = Image.open(image).convert("RGB")
150
  else:
151
  raise ValueError(f"Unsupported image type: {type(image)}")
152
+
153
  # Validate and resize if necessary
154
  pil_img = validate_and_resize_image(pil_img)
155
+
156
  # Convert to base64 data URI
157
  buf = io.BytesIO()
158
  pil_img.save(buf, format="PNG")
159
  data_uri = f"data:image/png;base64,{base64.b64encode(buf.getvalue()).decode()}"
160
+
161
  # Return message in vLLM chat format
162
  return [
163
  {
 
186
  ) -> str:
187
  """Create a dataset card documenting the OCR process."""
188
  model_name = model.split("/")[-1]
189
+
190
  return f"""---
191
  tags:
192
  - ocr
 
312
  include_thinking: bool = False,
313
  temperature: float = 0.0,
314
  custom_prompt: Optional[str] = None,
315
+ output_column: str = "markdown",
316
+ config: str = None,
317
+ create_pr: bool = False,
318
+ verbose: bool = False,
319
  ):
320
  """Process images from HF dataset through NuMarkdown model.
321
+
322
  The max_tokens parameter controls the total token budget for both
323
  thinking and answer phases. For complex documents with extensive
324
  reasoning, the default of 16384 tokens provides ample room for both
325
  the thinking process and the final markdown output.
326
  """
327
+
328
  # GPU check and configuration
329
  num_gpus = check_gpu_availability()
330
  if tensor_parallel_size is None:
 
338
  logger.warning(
339
  f"Requested {tensor_parallel_size} GPUs but only {num_gpus} available"
340
  )
341
+
342
  # Track processing start time
343
  start_time = datetime.now()
344
+
 
 
 
345
  # Login to HF if token provided
346
  HF_TOKEN = hf_token or os.environ.get("HF_TOKEN")
347
  if HF_TOKEN:
348
  login(token=HF_TOKEN)
349
+
350
  # Load dataset
351
  logger.info(f"Loading dataset: {input_dataset}")
352
  dataset = load_dataset(input_dataset, split=split)
353
+
354
  # Validate image column
355
  if image_column not in dataset.column_names:
356
  raise ValueError(
357
  f"Column '{image_column}' not found. Available: {dataset.column_names}"
358
  )
359
+
360
  # Shuffle if requested
361
  if shuffle:
362
  logger.info(f"Shuffling dataset with seed {seed}")
363
  dataset = dataset.shuffle(seed=seed)
364
+
365
  # Limit samples if requested
366
  if max_samples:
367
  dataset = dataset.select(range(min(max_samples, len(dataset))))
368
  logger.info(f"Limited to {len(dataset)} samples")
369
+
370
  # Initialize vLLM with trust_remote_code for NuMarkdown
371
  logger.info(f"Initializing vLLM with model: {model}")
372
  logger.info(f"Using {tensor_parallel_size} GPU(s) for inference")
 
378
  tensor_parallel_size=tensor_parallel_size,
379
  limit_mm_per_prompt={"image": 1},
380
  )
381
+
382
  # Set up sampling parameters
383
  sampling_params = SamplingParams(
384
  temperature=temperature,
385
  max_tokens=max_tokens,
386
  )
387
+
388
  # Use custom prompt if provided, otherwise use default
389
+ prompt = (
390
+ custom_prompt
391
+ or "Convert this document to markdown. Focus on preserving structure, tables, formulas, and all textual content."
392
+ )
393
+
394
  # Process images in batches
395
  all_markdown = []
396
+
397
  logger.info(f"Processing {len(dataset)} images in batches of {batch_size}")
398
  logger.info(f"Including thinking traces: {include_thinking}")
399
+
400
  # Process in batches to avoid memory issues
401
  for batch_indices in tqdm(
402
  partition_all(batch_size, range(len(dataset))),
 
405
  ):
406
  batch_indices = list(batch_indices)
407
  batch_images = [dataset[i][image_column] for i in batch_indices]
408
+
409
  try:
410
  # Create messages for batch
411
  batch_messages = [
412
  make_numarkdown_message(img, prompt) for img in batch_images
413
  ]
414
+
415
  # Process with vLLM
416
  outputs = llm.chat(batch_messages, sampling_params)
417
+
418
  # Extract markdown from outputs
419
  for output in outputs:
420
  raw_text = output.outputs[0].text.strip()
421
  # Extract answer from thinking tokens
422
  markdown_text = extract_answer_from_thinking(raw_text, include_thinking)
423
  all_markdown.append(markdown_text)
424
+
425
  except Exception as e:
426
  logger.error(f"Error processing batch: {e}")
427
  # Add error placeholders for failed batch
428
  all_markdown.extend(["[OCR FAILED]"] * len(batch_images))
429
+
430
+ # Add output column to dataset
431
+ logger.info(f"Adding '{output_column}' column to dataset")
432
+ dataset = dataset.add_column(output_column, all_markdown)
433
+
434
  # Handle inference_info tracking
435
+ inference_entry = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
436
  "model_id": model,
437
+ "model_name": "NuMarkdown-8B-Thinking",
438
+ "column_name": output_column,
439
+ "timestamp": datetime.now().isoformat(),
 
 
440
  "include_thinking": include_thinking,
441
  "temperature": temperature,
442
+ "max_tokens": max_tokens,
 
 
 
443
  }
444
+
445
+ if "inference_info" in dataset.column_names:
446
+ logger.info("Updating existing inference_info column")
447
+
448
+ def update_inference_info(example):
449
+ try:
450
+ existing_info = (
451
+ json.loads(example["inference_info"])
452
+ if example["inference_info"]
453
+ else []
454
+ )
455
+ except (json.JSONDecodeError, TypeError):
456
+ existing_info = []
457
+ existing_info.append(inference_entry)
458
+ return {"inference_info": json.dumps(existing_info)}
459
+
460
+ dataset = dataset.map(update_inference_info)
461
+ else:
462
+ logger.info("Creating new inference_info column")
463
+ inference_list = [json.dumps([inference_entry])] * len(dataset)
464
+ dataset = dataset.add_column("inference_info", inference_list)
465
+
466
  # Calculate processing time
467
+ processing_duration = datetime.now() - start_time
 
468
  processing_time = f"{processing_duration.total_seconds() / 60:.1f} minutes"
469
+
470
+ # Push to hub with retry and XET fallback
471
+ logger.info(f"Pushing to {output_dataset}")
472
+ max_retries = 3
473
+ for attempt in range(1, max_retries + 1):
474
+ try:
475
+ if attempt > 1:
476
+ logger.warning("Disabling XET (fallback to HTTP upload)")
477
+ os.environ["HF_HUB_DISABLE_XET"] = "1"
478
+ dataset.push_to_hub(
479
+ output_dataset,
480
+ private=private,
481
+ token=HF_TOKEN,
482
+ max_shard_size="500MB",
483
+ **({"config_name": config} if config else {}),
484
+ create_pr=create_pr,
485
+ commit_message=f"Add {model} OCR results ({len(dataset)} samples)"
486
+ + (f" [{config}]" if config else ""),
487
+ )
488
+ break
489
+ except Exception as e:
490
+ logger.error(f"Upload attempt {attempt}/{max_retries} failed: {e}")
491
+ if attempt < max_retries:
492
+ delay = 30 * (2 ** (attempt - 1))
493
+ logger.info(f"Retrying in {delay}s...")
494
+ time.sleep(delay)
495
+ else:
496
+ logger.error("All upload attempts failed. OCR results are lost.")
497
+ sys.exit(1)
498
+
499
  # Create and push dataset card
500
  logger.info("Creating dataset card...")
501
  card_content = create_dataset_card(
 
512
  image_column=image_column,
513
  split=split,
514
  )
515
+
516
+ card = DatasetCard(card_content)
517
+ card.push_to_hub(output_dataset, token=HF_TOKEN)
518
+
519
+ logger.info("NuMarkdown processing complete!")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
520
  logger.info(
521
+ f"Dataset available at: https://huggingface.co/datasets/{output_dataset}"
522
  )
523
+ logger.info(f"Processing time: {processing_time}")
524
+
525
+ if verbose:
526
+ import importlib.metadata
527
+
528
+ logger.info("--- Resolved package versions ---")
529
+ for pkg in ["vllm", "transformers", "torch", "datasets", "pyarrow", "pillow"]:
530
+ try:
531
+ logger.info(f" {pkg}=={importlib.metadata.version(pkg)}")
532
+ except importlib.metadata.PackageNotFoundError:
533
+ logger.info(f" {pkg}: not installed")
534
+ logger.info("--- End versions ---")
535
 
536
 
537
  if __name__ == "__main__":
 
553
  print("\n1. Basic OCR conversion:")
554
  print(" uv run numarkdown-ocr.py document-images markdown-docs")
555
  print("\n2. Include thinking traces:")
556
+ print(
557
+ " uv run numarkdown-ocr.py complex-docs analyzed-docs --include-thinking"
558
+ )
559
  print("\n3. With custom settings:")
560
  print(" uv run numarkdown-ocr.py scientific-papers extracted-text \\")
561
  print(" --batch-size 8 \\")
 
565
  print(" uv run numarkdown-ocr.py large-dataset test-output --max-samples 10")
566
  print("\n5. Custom prompt for specific needs:")
567
  print(" uv run numarkdown-ocr.py invoices invoice-data \\")
568
+ print(
569
+ ' --custom-prompt "Extract all invoice details including line items"'
570
+ )
571
  print("\n6. Multi-GPU processing:")
572
+ print(
573
+ " uv run numarkdown-ocr.py large-docs processed-docs --tensor-parallel-size 2"
574
+ )
575
  print("\n7. Running on HF Jobs:")
576
  print(" hf jobs uv run --flavor a100x2 \\")
577
+ print(
578
+ ' -e HF_TOKEN=$(python3 -c "from huggingface_hub import get_token; print(get_token())") \\'
579
+ )
580
+ print(
581
+ " https://huggingface.co/datasets/uv-scripts/ocr/raw/main/numarkdown-ocr.py \\"
582
+ )
583
  print(" your-document-dataset \\")
584
  print(" your-markdown-output")
585
  print("\n" + "=" * 80)
586
  print("\nFor full help, run: uv run numarkdown-ocr.py --help")
587
  sys.exit(0)
588
+
589
  parser = argparse.ArgumentParser(
590
  description="OCR images to markdown using NuMarkdown-8B-Thinking with reasoning",
591
  formatter_class=argparse.RawDescriptionHelpFormatter,
 
610
  uv run numarkdown-ocr.py ordered-dataset random-sample --max-samples 50 --shuffle
611
  """,
612
  )
613
+
614
  parser.add_argument("input_dataset", help="Input dataset ID from Hugging Face Hub")
615
  parser.add_argument("output_dataset", help="Output dataset ID for Hugging Face Hub")
616
  parser.add_argument(
 
691
  type=str,
692
  help="Custom prompt for the model (overrides default)",
693
  )
694
+ parser.add_argument(
695
+ "--output-column",
696
+ default="markdown",
697
+ help="Column name for output text (default: markdown)",
698
+ )
699
+ parser.add_argument(
700
+ "--config",
701
+ help="Config/subset name when pushing to Hub (for benchmarking multiple models in one repo)",
702
+ )
703
+ parser.add_argument(
704
+ "--create-pr",
705
+ action="store_true",
706
+ help="Create a pull request instead of pushing directly (for parallel benchmarking)",
707
+ )
708
+ parser.add_argument(
709
+ "--verbose",
710
+ action="store_true",
711
+ help="Log resolved package versions after processing (useful for pinning deps)",
712
+ )
713
+
714
  args = parser.parse_args()
715
+
716
  main(
717
  input_dataset=args.input_dataset,
718
  output_dataset=args.output_dataset,
 
732
  include_thinking=args.include_thinking,
733
  temperature=args.temperature,
734
  custom_prompt=args.custom_prompt,
735
+ output_column=args.output_column,
736
+ config=args.config,
737
+ create_pr=args.create_pr,
738
+ verbose=args.verbose,
739
+ )
paddleocr-vl-1.5.py CHANGED
@@ -47,6 +47,7 @@ import json
47
  import logging
48
  import os
49
  import sys
 
50
  from datetime import datetime
51
  from typing import Any, Dict, List, Union
52
 
@@ -273,14 +274,17 @@ def main(
273
  output_dataset: str,
274
  image_column: str = "image",
275
  task_mode: str = "ocr",
276
- max_tokens: int = 512,
277
  hf_token: str = None,
278
  split: str = "train",
279
  max_samples: int = None,
280
  private: bool = False,
281
  shuffle: bool = False,
282
  seed: int = 42,
283
- output_column: str = None,
 
 
 
284
  ):
285
  """Process images from HF dataset through PaddleOCR-VL-1.5 model."""
286
 
@@ -301,10 +305,6 @@ def main(
301
  f"Invalid task_mode '{task_mode}'. Choose from: {list(TASK_MODES.keys())}"
302
  )
303
 
304
- # Auto-generate output column name based on task mode
305
- if output_column is None:
306
- output_column = f"paddleocr_1.5_{task_mode}"
307
-
308
  logger.info(f"Using task mode: {task_mode} - {TASK_DESCRIPTIONS[task_mode]}")
309
  logger.info(f"Output will be written to column: {output_column}")
310
 
@@ -442,9 +442,34 @@ def main(
442
  inference_list = [json.dumps([inference_entry])] * len(dataset)
443
  dataset = dataset.add_column("inference_info", inference_list)
444
 
445
- # Push to hub
446
  logger.info(f"Pushing to {output_dataset}")
447
- dataset.push_to_hub(output_dataset, private=private, token=HF_TOKEN)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
448
 
449
  # Create and push dataset card
450
  logger.info("Creating dataset card")
@@ -472,6 +497,17 @@ def main(
472
  )
473
  logger.info(f"Task mode: {task_mode} - {TASK_DESCRIPTIONS[task_mode]}")
474
 
 
 
 
 
 
 
 
 
 
 
 
475
 
476
  if __name__ == "__main__":
477
  # Show example usage if no arguments
@@ -577,7 +613,7 @@ Backend: Transformers batch inference (not vLLM)
577
  "--max-tokens",
578
  type=int,
579
  default=512,
580
- help="Maximum tokens to generate (default: 512)",
581
  )
582
  parser.add_argument("--hf-token", help="Hugging Face API token")
583
  parser.add_argument(
@@ -602,7 +638,22 @@ Backend: Transformers batch inference (not vLLM)
602
  )
603
  parser.add_argument(
604
  "--output-column",
605
- help="Column name for output (default: paddleocr_1.5_[task_mode])",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
606
  )
607
 
608
  args = parser.parse_args()
@@ -620,4 +671,7 @@ Backend: Transformers batch inference (not vLLM)
620
  shuffle=args.shuffle,
621
  seed=args.seed,
622
  output_column=args.output_column,
 
 
 
623
  )
 
47
  import logging
48
  import os
49
  import sys
50
+ import time
51
  from datetime import datetime
52
  from typing import Any, Dict, List, Union
53
 
 
274
  output_dataset: str,
275
  image_column: str = "image",
276
  task_mode: str = "ocr",
277
+ max_tokens: int = 512, # model card example uses 512 (element-level); increase for full pages
278
  hf_token: str = None,
279
  split: str = "train",
280
  max_samples: int = None,
281
  private: bool = False,
282
  shuffle: bool = False,
283
  seed: int = 42,
284
+ output_column: str = "markdown",
285
+ config: str = None,
286
+ create_pr: bool = False,
287
+ verbose: bool = False,
288
  ):
289
  """Process images from HF dataset through PaddleOCR-VL-1.5 model."""
290
 
 
305
  f"Invalid task_mode '{task_mode}'. Choose from: {list(TASK_MODES.keys())}"
306
  )
307
 
 
 
 
 
308
  logger.info(f"Using task mode: {task_mode} - {TASK_DESCRIPTIONS[task_mode]}")
309
  logger.info(f"Output will be written to column: {output_column}")
310
 
 
442
  inference_list = [json.dumps([inference_entry])] * len(dataset)
443
  dataset = dataset.add_column("inference_info", inference_list)
444
 
445
+ # Push to hub with retry and XET fallback
446
  logger.info(f"Pushing to {output_dataset}")
447
+ max_retries = 3
448
+ for attempt in range(1, max_retries + 1):
449
+ try:
450
+ if attempt > 1:
451
+ logger.warning("Disabling XET (fallback to HTTP upload)")
452
+ os.environ["HF_HUB_DISABLE_XET"] = "1"
453
+ dataset.push_to_hub(
454
+ output_dataset,
455
+ private=private,
456
+ token=HF_TOKEN,
457
+ max_shard_size="500MB",
458
+ **({"config_name": config} if config else {}),
459
+ create_pr=create_pr,
460
+ commit_message=f"Add {MODEL_ID} OCR results ({len(dataset)} samples)"
461
+ + (f" [{config}]" if config else ""),
462
+ )
463
+ break
464
+ except Exception as e:
465
+ logger.error(f"Upload attempt {attempt}/{max_retries} failed: {e}")
466
+ if attempt < max_retries:
467
+ delay = 30 * (2 ** (attempt - 1))
468
+ logger.info(f"Retrying in {delay}s...")
469
+ time.sleep(delay)
470
+ else:
471
+ logger.error("All upload attempts failed. OCR results are lost.")
472
+ sys.exit(1)
473
 
474
  # Create and push dataset card
475
  logger.info("Creating dataset card")
 
497
  )
498
  logger.info(f"Task mode: {task_mode} - {TASK_DESCRIPTIONS[task_mode]}")
499
 
500
+ if verbose:
501
+ import importlib.metadata
502
+
503
+ logger.info("--- Resolved package versions ---")
504
+ for pkg in ["vllm", "transformers", "torch", "datasets", "pyarrow", "pillow"]:
505
+ try:
506
+ logger.info(f" {pkg}=={importlib.metadata.version(pkg)}")
507
+ except importlib.metadata.PackageNotFoundError:
508
+ logger.info(f" {pkg}: not installed")
509
+ logger.info("--- End versions ---")
510
+
511
 
512
  if __name__ == "__main__":
513
  # Show example usage if no arguments
 
613
  "--max-tokens",
614
  type=int,
615
  default=512,
616
+ help="Maximum tokens to generate (default: 512, per model card element-level example; increase for full pages)",
617
  )
618
  parser.add_argument("--hf-token", help="Hugging Face API token")
619
  parser.add_argument(
 
638
  )
639
  parser.add_argument(
640
  "--output-column",
641
+ default="markdown",
642
+ help="Column name for output text (default: markdown)",
643
+ )
644
+ parser.add_argument(
645
+ "--config",
646
+ help="Config/subset name when pushing to Hub (for benchmarking multiple models in one repo)",
647
+ )
648
+ parser.add_argument(
649
+ "--create-pr",
650
+ action="store_true",
651
+ help="Create a pull request instead of pushing directly (for parallel benchmarking)",
652
+ )
653
+ parser.add_argument(
654
+ "--verbose",
655
+ action="store_true",
656
+ help="Log resolved package versions after processing (useful for pinning deps)",
657
  )
658
 
659
  args = parser.parse_args()
 
671
  shuffle=args.shuffle,
672
  seed=args.seed,
673
  output_column=args.output_column,
674
+ config=args.config,
675
+ create_pr=args.create_pr,
676
+ verbose=args.verbose,
677
  )
paddleocr-vl.py CHANGED
@@ -13,7 +13,7 @@
13
  # ]
14
  #
15
  # [[tool.uv.index]]
16
- # url = "https://wheels.vllm.ai/nightly"
17
  #
18
  # [tool.uv]
19
  # prerelease = "allow"
@@ -38,6 +38,11 @@ Features:
38
 
39
  Model: PaddlePaddle/PaddleOCR-VL
40
  vLLM: Requires nightly build for full support
 
 
 
 
 
41
  """
42
 
43
  import argparse
@@ -236,7 +241,7 @@ This dataset contains {task_mode.upper()} results from images in [{source_datase
236
  ### Configuration
237
 
238
  - **Image Column**: `{image_column}`
239
- - **Output Column**: `paddleocr_{task_mode}`
240
  - **Dataset Split**: `{split}`
241
  - **Batch Size**: {batch_size}
242
  - **Smart Resize**: {"Enabled" if apply_smart_resize else "Disabled"}
@@ -267,7 +272,7 @@ PaddleOCR-VL is a state-of-the-art, resource-efficient model tailored for docume
267
  ## Dataset Structure
268
 
269
  The dataset contains all original columns plus:
270
- - `paddleocr_{task_mode}`: The extracted content based on task mode
271
  - `inference_info`: JSON list tracking all OCR models applied to this dataset
272
 
273
  ## Usage
@@ -281,7 +286,7 @@ dataset = load_dataset("{{output_dataset_id}}", split="{split}")
281
 
282
  # Access the extracted content
283
  for example in dataset:
284
- print(example["paddleocr_{task_mode}"])
285
  break
286
 
287
  # View all OCR models applied to this dataset
@@ -334,17 +339,25 @@ def main(
334
  shuffle: bool = False,
335
  seed: int = 42,
336
  output_column: str = None,
 
337
  ):
338
  """Process images from HF dataset through PaddleOCR-VL model."""
339
 
340
  # Check CUDA availability first
341
  check_cuda_availability()
342
 
 
 
 
 
 
 
 
343
  # Track processing start time
344
  start_time = datetime.now()
345
 
346
- # Enable HF_TRANSFER for faster downloads
347
- os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
348
 
349
  # Login to HF if token provided
350
  HF_TOKEN = hf_token or os.environ.get("HF_TOKEN")
@@ -357,9 +370,9 @@ def main(
357
  f"Invalid task_mode '{task_mode}'. Choose from: {list(TASK_MODES.keys())}"
358
  )
359
 
360
- # Auto-generate output column name based on task mode
361
  if output_column is None:
362
- output_column = f"paddleocr_{task_mode}"
363
 
364
  logger.info(f"Using task mode: {task_mode} - {TASK_DESCRIPTIONS[task_mode]}")
365
  logger.info(f"Output will be written to column: {output_column}")
@@ -390,19 +403,28 @@ def main(
390
  logger.info("This may take a minute on first run (model is only 0.9B)...")
391
 
392
  # Note: PaddleOCR-VL requires specific vLLM configuration
393
- # The model needs custom implementation files to be loaded
394
- os.environ["VLLM_USE_V1"] = "0" # Disable V1 engine for compatibility
395
-
396
- llm = LLM(
397
- model=model_name,
398
- trust_remote_code=True,
399
- max_model_len=max_model_len,
400
- gpu_memory_utilization=gpu_memory_utilization,
401
- limit_mm_per_prompt={"image": 1},
402
- max_num_batched_tokens=16384, # Match server config
403
- enable_prefix_caching=False, # Disable prefix caching like server
404
- enforce_eager=True, # Use eager mode instead of CUDA graphs
405
- )
 
 
 
 
 
 
 
 
 
406
 
407
  # Sampling parameters - deterministic for OCR
408
  sampling_params = SamplingParams(
@@ -524,6 +546,17 @@ def main(
524
  logger.info(f"Processing time: {processing_time_str}")
525
  logger.info(f"Task mode: {task_mode} - {TASK_DESCRIPTIONS[task_mode]}")
526
 
 
 
 
 
 
 
 
 
 
 
 
527
 
528
  if __name__ == "__main__":
529
  # Show example usage if no arguments
@@ -562,7 +595,7 @@ if __name__ == "__main__":
562
  print(
563
  ' -e HF_TOKEN=$(python3 -c "from huggingface_hub import get_token; print(get_token())") \\'
564
  )
565
- print(" -e HF_HUB_ENABLE_HF_TRANSFER=1 \\")
566
  print(
567
  " https://huggingface.co/datasets/uv-scripts/ocr/raw/main/paddleocr-vl.py \\"
568
  )
@@ -673,7 +706,12 @@ Examples:
673
  )
674
  parser.add_argument(
675
  "--output-column",
676
- help="Column name for output (default: paddleocr_[task_mode])",
 
 
 
 
 
677
  )
678
 
679
  args = parser.parse_args()
@@ -696,4 +734,5 @@ Examples:
696
  shuffle=args.shuffle,
697
  seed=args.seed,
698
  output_column=args.output_column,
 
699
  )
 
13
  # ]
14
  #
15
  # [[tool.uv.index]]
16
+ # url = "https://wheels.vllm.ai/nightly/cu129"
17
  #
18
  # [tool.uv]
19
  # prerelease = "allow"
 
38
 
39
  Model: PaddlePaddle/PaddleOCR-VL
40
  vLLM: Requires nightly build for full support
41
+
42
+ IMPORTANT: As of Nov 2024, PaddleOCR-VL batch processing support in vLLM is still
43
+ being finalized. The model works with `vllm serve` but may have issues with the
44
+ LLM batch class. Use alternative models like LightOnOCR or DoTS for now, or run
45
+ PaddleOCR-VL in server mode as shown in the vLLM documentation.
46
  """
47
 
48
  import argparse
 
241
  ### Configuration
242
 
243
  - **Image Column**: `{image_column}`
244
+ - **Output Column**: `markdown`
245
  - **Dataset Split**: `{split}`
246
  - **Batch Size**: {batch_size}
247
  - **Smart Resize**: {"Enabled" if apply_smart_resize else "Disabled"}
 
272
  ## Dataset Structure
273
 
274
  The dataset contains all original columns plus:
275
+ - `markdown`: The extracted content based on task mode
276
  - `inference_info`: JSON list tracking all OCR models applied to this dataset
277
 
278
  ## Usage
 
286
 
287
  # Access the extracted content
288
  for example in dataset:
289
+ print(example["markdown"])
290
  break
291
 
292
  # View all OCR models applied to this dataset
 
339
  shuffle: bool = False,
340
  seed: int = 42,
341
  output_column: str = None,
342
+ verbose: bool = False,
343
  ):
344
  """Process images from HF dataset through PaddleOCR-VL model."""
345
 
346
  # Check CUDA availability first
347
  check_cuda_availability()
348
 
349
+ # Compatibility warning
350
+ logger.warning("⚠️ PaddleOCR-VL batch processing in vLLM is experimental.")
351
+ logger.warning("If initialization fails, consider using:")
352
+ logger.warning(" 1. LightOnOCR (1B) - smallest stable model")
353
+ logger.warning(" 2. DoTS OCR (1.7B) - multilingual support")
354
+ logger.warning(" 3. vllm serve mode for PaddleOCR-VL")
355
+
356
  # Track processing start time
357
  start_time = datetime.now()
358
 
359
+ # Enable high-performance Xet downloads
360
+ os.environ["HF_XET_HIGH_PERFORMANCE"] = "1"
361
 
362
  # Login to HF if token provided
363
  HF_TOKEN = hf_token or os.environ.get("HF_TOKEN")
 
370
  f"Invalid task_mode '{task_mode}'. Choose from: {list(TASK_MODES.keys())}"
371
  )
372
 
373
+ # Default output column is 'markdown' for consistency across scripts
374
  if output_column is None:
375
+ output_column = "markdown"
376
 
377
  logger.info(f"Using task mode: {task_mode} - {TASK_DESCRIPTIONS[task_mode]}")
378
  logger.info(f"Output will be written to column: {output_column}")
 
403
  logger.info("This may take a minute on first run (model is only 0.9B)...")
404
 
405
  # Note: PaddleOCR-VL requires specific vLLM configuration
406
+ # The model needs to be loaded with specific settings
407
+ try:
408
+ # Try with standard configuration first
409
+ llm = LLM(
410
+ model=model_name,
411
+ trust_remote_code=True,
412
+ max_model_len=max_model_len,
413
+ gpu_memory_utilization=gpu_memory_utilization,
414
+ limit_mm_per_prompt={"image": 1},
415
+ max_num_batched_tokens=16384,
416
+ enable_prefix_caching=False,
417
+ enforce_eager=True,
418
+ )
419
+ except Exception as e:
420
+ logger.error(f"Failed to initialize PaddleOCR-VL with vLLM: {e}")
421
+ logger.error("PaddleOCR-VL may require a newer vLLM version or server mode.")
422
+ logger.error("Try running with vllm serve instead:")
423
+ logger.error(f" vllm serve {model_name} --trust-remote-code \\")
424
+ logger.error(" --max-num-batched-tokens 16384 \\")
425
+ logger.error(" --no-enable-prefix-caching \\")
426
+ logger.error(" --mm-processor-cache-gb 0")
427
+ sys.exit(1)
428
 
429
  # Sampling parameters - deterministic for OCR
430
  sampling_params = SamplingParams(
 
546
  logger.info(f"Processing time: {processing_time_str}")
547
  logger.info(f"Task mode: {task_mode} - {TASK_DESCRIPTIONS[task_mode]}")
548
 
549
+ if verbose:
550
+ import importlib.metadata
551
+
552
+ logger.info("--- Resolved package versions ---")
553
+ for pkg in ["vllm", "transformers", "torch", "datasets", "pyarrow", "pillow"]:
554
+ try:
555
+ logger.info(f" {pkg}=={importlib.metadata.version(pkg)}")
556
+ except importlib.metadata.PackageNotFoundError:
557
+ logger.info(f" {pkg}: not installed")
558
+ logger.info("--- End versions ---")
559
+
560
 
561
  if __name__ == "__main__":
562
  # Show example usage if no arguments
 
595
  print(
596
  ' -e HF_TOKEN=$(python3 -c "from huggingface_hub import get_token; print(get_token())") \\'
597
  )
598
+ print(" -e HF_XET_HIGH_PERFORMANCE=1 \\")
599
  print(
600
  " https://huggingface.co/datasets/uv-scripts/ocr/raw/main/paddleocr-vl.py \\"
601
  )
 
706
  )
707
  parser.add_argument(
708
  "--output-column",
709
+ help="Column name for output (default: markdown)",
710
+ )
711
+ parser.add_argument(
712
+ "--verbose",
713
+ action="store_true",
714
+ help="Log resolved package versions after processing (useful for pinning deps)",
715
  )
716
 
717
  args = parser.parse_args()
 
734
  shuffle=args.shuffle,
735
  seed=args.seed,
736
  output_column=args.output_column,
737
+ verbose=args.verbose,
738
  )
smoldocling-ocr.py CHANGED
@@ -2,7 +2,7 @@
2
  # requires-python = ">=3.11"
3
  # dependencies = [
4
  # "datasets",
5
- # "huggingface-hub[hf_transfer]",
6
  # "pillow",
7
  # "vllm",
8
  # "tqdm",
@@ -30,20 +30,17 @@ Features:
30
  """
31
 
32
  import argparse
33
- import base64
34
  import io
35
  import json
36
  import logging
37
  import os
38
- import re
39
  import sys
40
- from typing import Any, Dict, List, Union
41
  from datetime import datetime
 
42
 
43
  import torch
44
  from datasets import load_dataset
45
- from docling_core.types.doc import DoclingDocument
46
- from docling_core.types.doc.document import DocTagsDocument
47
  from huggingface_hub import DatasetCard, login
48
  from PIL import Image
49
  from toolz import partition_all
@@ -226,11 +223,14 @@ def main(
226
  split: str = "train",
227
  max_samples: int = None,
228
  private: bool = False,
229
- output_column: str = None,
230
  output_format: str = "markdown",
231
  shuffle: bool = False,
232
  seed: int = 42,
233
  prompt: str = "Convert page to Docling.",
 
 
 
234
  ):
235
  """Process images from HF dataset through SmolDocling model."""
236
 
@@ -240,9 +240,6 @@ def main(
240
  # Track processing start time
241
  start_time = datetime.now()
242
 
243
- # Enable HF_TRANSFER for faster downloads
244
- os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
245
-
246
  # Login to HF if token provided
247
  HF_TOKEN = hf_token or os.environ.get("HF_TOKEN")
248
  if HF_TOKEN:
@@ -252,13 +249,6 @@ def main(
252
  logger.info(f"Loading dataset: {input_dataset}")
253
  dataset = load_dataset(input_dataset, split=split)
254
 
255
- # Set output column name dynamically if not provided
256
- if output_column is None:
257
- # Extract model name from path (e.g., "ds4sd/SmolDocling-256M-preview" -> "smoldocling")
258
- model_name = model.split("/")[-1].split("-")[0].lower()
259
- output_column = f"{model_name}_text"
260
- logger.info(f"Using dynamic output column name: {output_column}")
261
-
262
  # Validate image column
263
  if image_column not in dataset.column_names:
264
  raise ValueError(
@@ -340,52 +330,69 @@ def main(
340
  dataset = dataset.add_column(output_column, all_output)
341
 
342
  # Handle inference_info tracking
343
- logger.info("Updating inference_info...")
344
-
345
- # Check for existing inference_info
346
- if "inference_info" in dataset.column_names:
347
- # Parse existing info from first row (all rows have same info)
348
- try:
349
- existing_info = json.loads(dataset[0]["inference_info"])
350
- if not isinstance(existing_info, list):
351
- existing_info = [existing_info] # Convert old format to list
352
- except (json.JSONDecodeError, TypeError):
353
- existing_info = []
354
- # Remove old column to update it
355
- dataset = dataset.remove_columns(["inference_info"])
356
- else:
357
- existing_info = []
358
-
359
- # Add new inference info
360
- new_info = {
361
- "column_name": output_column,
362
  "model_id": model,
363
- "processing_date": datetime.now().isoformat(),
364
- "batch_size": batch_size,
365
- "max_tokens": max_tokens,
366
- "gpu_memory_utilization": gpu_memory_utilization,
367
- "max_model_len": max_model_len,
368
  "output_format": output_format,
369
- "prompt": prompt,
370
- "script": "smoldocling-ocr.py",
371
- "script_version": "1.0.0",
372
- "script_url": "https://huggingface.co/datasets/uv-scripts/ocr/raw/main/smoldocling-ocr.py",
373
  }
374
- existing_info.append(new_info)
375
-
376
- # Add updated inference_info column
377
- info_json = json.dumps(existing_info, ensure_ascii=False)
378
- dataset = dataset.add_column("inference_info", [info_json] * len(dataset))
379
 
380
- # Push to hub
381
- logger.info(f"Pushing to {output_dataset}")
382
- dataset.push_to_hub(output_dataset, private=private, token=HF_TOKEN)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
383
 
384
  # Calculate processing time
385
- end_time = datetime.now()
386
- processing_duration = end_time - start_time
387
  processing_time = f"{processing_duration.total_seconds() / 60:.1f} minutes"
388
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
389
  # Create and push dataset card
390
  logger.info("Creating dataset card...")
391
  card_content = create_dataset_card(
@@ -405,12 +412,24 @@ def main(
405
 
406
  card = DatasetCard(card_content)
407
  card.push_to_hub(output_dataset, token=HF_TOKEN)
408
- logger.info("Dataset card created and pushed!")
409
 
410
- logger.info(" OCR conversion complete!")
411
  logger.info(
412
  f"Dataset available at: https://huggingface.co/datasets/{output_dataset}"
413
  )
 
 
 
 
 
 
 
 
 
 
 
 
414
 
415
 
416
  if __name__ == "__main__":
@@ -531,8 +550,8 @@ Examples:
531
  )
532
  parser.add_argument(
533
  "--output-column",
534
- default=None,
535
- help="Name of the output column for extracted text (default: auto-generated from model name)",
536
  )
537
  parser.add_argument(
538
  "--output-format",
@@ -556,6 +575,20 @@ Examples:
556
  default="Convert page to Docling.",
557
  help="Custom prompt for the model (default: 'Convert page to Docling.')",
558
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
559
 
560
  args = parser.parse_args()
561
 
@@ -577,4 +610,7 @@ Examples:
577
  shuffle=args.shuffle,
578
  seed=args.seed,
579
  prompt=args.prompt,
 
 
 
580
  )
 
2
  # requires-python = ">=3.11"
3
  # dependencies = [
4
  # "datasets",
5
+ # "huggingface-hub",
6
  # "pillow",
7
  # "vllm",
8
  # "tqdm",
 
30
  """
31
 
32
  import argparse
 
33
  import io
34
  import json
35
  import logging
36
  import os
 
37
  import sys
38
+ import time
39
  from datetime import datetime
40
+ from typing import Any, Dict, Union
41
 
42
  import torch
43
  from datasets import load_dataset
 
 
44
  from huggingface_hub import DatasetCard, login
45
  from PIL import Image
46
  from toolz import partition_all
 
223
  split: str = "train",
224
  max_samples: int = None,
225
  private: bool = False,
226
+ output_column: str = "markdown",
227
  output_format: str = "markdown",
228
  shuffle: bool = False,
229
  seed: int = 42,
230
  prompt: str = "Convert page to Docling.",
231
+ config: str = None,
232
+ create_pr: bool = False,
233
+ verbose: bool = False,
234
  ):
235
  """Process images from HF dataset through SmolDocling model."""
236
 
 
240
  # Track processing start time
241
  start_time = datetime.now()
242
 
 
 
 
243
  # Login to HF if token provided
244
  HF_TOKEN = hf_token or os.environ.get("HF_TOKEN")
245
  if HF_TOKEN:
 
249
  logger.info(f"Loading dataset: {input_dataset}")
250
  dataset = load_dataset(input_dataset, split=split)
251
 
 
 
 
 
 
 
 
252
  # Validate image column
253
  if image_column not in dataset.column_names:
254
  raise ValueError(
 
330
  dataset = dataset.add_column(output_column, all_output)
331
 
332
  # Handle inference_info tracking
333
+ inference_entry = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
334
  "model_id": model,
335
+ "model_name": "SmolDocling-256M",
336
+ "column_name": output_column,
337
+ "timestamp": datetime.now().isoformat(),
 
 
338
  "output_format": output_format,
339
+ "max_tokens": max_tokens,
 
 
 
340
  }
 
 
 
 
 
341
 
342
+ if "inference_info" in dataset.column_names:
343
+ logger.info("Updating existing inference_info column")
344
+
345
+ def update_inference_info(example):
346
+ try:
347
+ existing_info = (
348
+ json.loads(example["inference_info"])
349
+ if example["inference_info"]
350
+ else []
351
+ )
352
+ except (json.JSONDecodeError, TypeError):
353
+ existing_info = []
354
+ existing_info.append(inference_entry)
355
+ return {"inference_info": json.dumps(existing_info)}
356
+
357
+ dataset = dataset.map(update_inference_info)
358
+ else:
359
+ logger.info("Creating new inference_info column")
360
+ inference_list = [json.dumps([inference_entry])] * len(dataset)
361
+ dataset = dataset.add_column("inference_info", inference_list)
362
 
363
  # Calculate processing time
364
+ processing_duration = datetime.now() - start_time
 
365
  processing_time = f"{processing_duration.total_seconds() / 60:.1f} minutes"
366
 
367
+ # Push to hub with retry and XET fallback
368
+ logger.info(f"Pushing to {output_dataset}")
369
+ max_retries = 3
370
+ for attempt in range(1, max_retries + 1):
371
+ try:
372
+ if attempt > 1:
373
+ logger.warning("Disabling XET (fallback to HTTP upload)")
374
+ os.environ["HF_HUB_DISABLE_XET"] = "1"
375
+ dataset.push_to_hub(
376
+ output_dataset,
377
+ private=private,
378
+ token=HF_TOKEN,
379
+ max_shard_size="500MB",
380
+ **({"config_name": config} if config else {}),
381
+ create_pr=create_pr,
382
+ commit_message=f"Add {model} OCR results ({len(dataset)} samples)"
383
+ + (f" [{config}]" if config else ""),
384
+ )
385
+ break
386
+ except Exception as e:
387
+ logger.error(f"Upload attempt {attempt}/{max_retries} failed: {e}")
388
+ if attempt < max_retries:
389
+ delay = 30 * (2 ** (attempt - 1))
390
+ logger.info(f"Retrying in {delay}s...")
391
+ time.sleep(delay)
392
+ else:
393
+ logger.error("All upload attempts failed. OCR results are lost.")
394
+ sys.exit(1)
395
+
396
  # Create and push dataset card
397
  logger.info("Creating dataset card...")
398
  card_content = create_dataset_card(
 
412
 
413
  card = DatasetCard(card_content)
414
  card.push_to_hub(output_dataset, token=HF_TOKEN)
415
+ logger.info("Dataset card created and pushed!")
416
 
417
+ logger.info("SmolDocling processing complete!")
418
  logger.info(
419
  f"Dataset available at: https://huggingface.co/datasets/{output_dataset}"
420
  )
421
+ logger.info(f"Processing time: {processing_time}")
422
+
423
+ if verbose:
424
+ import importlib.metadata
425
+
426
+ logger.info("--- Resolved package versions ---")
427
+ for pkg in ["vllm", "transformers", "torch", "datasets", "pyarrow", "pillow"]:
428
+ try:
429
+ logger.info(f" {pkg}=={importlib.metadata.version(pkg)}")
430
+ except importlib.metadata.PackageNotFoundError:
431
+ logger.info(f" {pkg}: not installed")
432
+ logger.info("--- End versions ---")
433
 
434
 
435
  if __name__ == "__main__":
 
550
  )
551
  parser.add_argument(
552
  "--output-column",
553
+ default="markdown",
554
+ help="Column name for output text (default: markdown)",
555
  )
556
  parser.add_argument(
557
  "--output-format",
 
575
  default="Convert page to Docling.",
576
  help="Custom prompt for the model (default: 'Convert page to Docling.')",
577
  )
578
+ parser.add_argument(
579
+ "--config",
580
+ help="Config/subset name when pushing to Hub (for benchmarking multiple models in one repo)",
581
+ )
582
+ parser.add_argument(
583
+ "--create-pr",
584
+ action="store_true",
585
+ help="Create a pull request instead of pushing directly (for parallel benchmarking)",
586
+ )
587
+ parser.add_argument(
588
+ "--verbose",
589
+ action="store_true",
590
+ help="Log resolved package versions after processing (useful for pinning deps)",
591
+ )
592
 
593
  args = parser.parse_args()
594
 
 
610
  shuffle=args.shuffle,
611
  seed=args.seed,
612
  prompt=args.prompt,
613
+ config=args.config,
614
+ create_pr=args.create_pr,
615
+ verbose=args.verbose,
616
  )