Updated script that can be controled by Nodejs web app
This commit is contained in:
19
lib/python3.13/site-packages/pandas/io/excel/__init__.py
Normal file
19
lib/python3.13/site-packages/pandas/io/excel/__init__.py
Normal file
@ -0,0 +1,19 @@
|
||||
from pandas.io.excel._base import (
|
||||
ExcelFile,
|
||||
ExcelWriter,
|
||||
read_excel,
|
||||
)
|
||||
from pandas.io.excel._odswriter import ODSWriter as _ODSWriter
|
||||
from pandas.io.excel._openpyxl import OpenpyxlWriter as _OpenpyxlWriter
|
||||
from pandas.io.excel._util import register_writer
|
||||
from pandas.io.excel._xlsxwriter import XlsxWriter as _XlsxWriter
|
||||
|
||||
__all__ = ["read_excel", "ExcelWriter", "ExcelFile"]
|
||||
|
||||
|
||||
register_writer(_OpenpyxlWriter)
|
||||
|
||||
register_writer(_XlsxWriter)
|
||||
|
||||
|
||||
register_writer(_ODSWriter)
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
1659
lib/python3.13/site-packages/pandas/io/excel/_base.py
Normal file
1659
lib/python3.13/site-packages/pandas/io/excel/_base.py
Normal file
File diff suppressed because it is too large
Load Diff
121
lib/python3.13/site-packages/pandas/io/excel/_calamine.py
Normal file
121
lib/python3.13/site-packages/pandas/io/excel/_calamine.py
Normal file
@ -0,0 +1,121 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import (
|
||||
date,
|
||||
datetime,
|
||||
time,
|
||||
timedelta,
|
||||
)
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Union,
|
||||
)
|
||||
|
||||
from pandas.compat._optional import import_optional_dependency
|
||||
from pandas.util._decorators import doc
|
||||
|
||||
import pandas as pd
|
||||
from pandas.core.shared_docs import _shared_docs
|
||||
|
||||
from pandas.io.excel._base import BaseExcelReader
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from python_calamine import (
|
||||
CalamineSheet,
|
||||
CalamineWorkbook,
|
||||
)
|
||||
|
||||
from pandas._typing import (
|
||||
FilePath,
|
||||
NaTType,
|
||||
ReadBuffer,
|
||||
Scalar,
|
||||
StorageOptions,
|
||||
)
|
||||
|
||||
_CellValue = Union[int, float, str, bool, time, date, datetime, timedelta]
|
||||
|
||||
|
||||
class CalamineReader(BaseExcelReader["CalamineWorkbook"]):
|
||||
@doc(storage_options=_shared_docs["storage_options"])
|
||||
def __init__(
|
||||
self,
|
||||
filepath_or_buffer: FilePath | ReadBuffer[bytes],
|
||||
storage_options: StorageOptions | None = None,
|
||||
engine_kwargs: dict | None = None,
|
||||
) -> None:
|
||||
"""
|
||||
Reader using calamine engine (xlsx/xls/xlsb/ods).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
filepath_or_buffer : str, path to be parsed or
|
||||
an open readable stream.
|
||||
{storage_options}
|
||||
engine_kwargs : dict, optional
|
||||
Arbitrary keyword arguments passed to excel engine.
|
||||
"""
|
||||
import_optional_dependency("python_calamine")
|
||||
super().__init__(
|
||||
filepath_or_buffer,
|
||||
storage_options=storage_options,
|
||||
engine_kwargs=engine_kwargs,
|
||||
)
|
||||
|
||||
@property
|
||||
def _workbook_class(self) -> type[CalamineWorkbook]:
|
||||
from python_calamine import CalamineWorkbook
|
||||
|
||||
return CalamineWorkbook
|
||||
|
||||
def load_workbook(
|
||||
self, filepath_or_buffer: FilePath | ReadBuffer[bytes], engine_kwargs: Any
|
||||
) -> CalamineWorkbook:
|
||||
from python_calamine import load_workbook
|
||||
|
||||
return load_workbook(filepath_or_buffer, **engine_kwargs)
|
||||
|
||||
@property
|
||||
def sheet_names(self) -> list[str]:
|
||||
from python_calamine import SheetTypeEnum
|
||||
|
||||
return [
|
||||
sheet.name
|
||||
for sheet in self.book.sheets_metadata
|
||||
if sheet.typ == SheetTypeEnum.WorkSheet
|
||||
]
|
||||
|
||||
def get_sheet_by_name(self, name: str) -> CalamineSheet:
|
||||
self.raise_if_bad_sheet_by_name(name)
|
||||
return self.book.get_sheet_by_name(name)
|
||||
|
||||
def get_sheet_by_index(self, index: int) -> CalamineSheet:
|
||||
self.raise_if_bad_sheet_by_index(index)
|
||||
return self.book.get_sheet_by_index(index)
|
||||
|
||||
def get_sheet_data(
|
||||
self, sheet: CalamineSheet, file_rows_needed: int | None = None
|
||||
) -> list[list[Scalar | NaTType | time]]:
|
||||
def _convert_cell(value: _CellValue) -> Scalar | NaTType | time:
|
||||
if isinstance(value, float):
|
||||
val = int(value)
|
||||
if val == value:
|
||||
return val
|
||||
else:
|
||||
return value
|
||||
elif isinstance(value, date):
|
||||
return pd.Timestamp(value)
|
||||
elif isinstance(value, timedelta):
|
||||
return pd.Timedelta(value)
|
||||
elif isinstance(value, time):
|
||||
return value
|
||||
|
||||
return value
|
||||
|
||||
rows: list[list[_CellValue]] = sheet.to_python(
|
||||
skip_empty_area=False, nrows=file_rows_needed
|
||||
)
|
||||
data = [[_convert_cell(cell) for cell in row] for row in rows]
|
||||
|
||||
return data
|
253
lib/python3.13/site-packages/pandas/io/excel/_odfreader.py
Normal file
253
lib/python3.13/site-packages/pandas/io/excel/_odfreader.py
Normal file
@ -0,0 +1,253 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
cast,
|
||||
)
|
||||
|
||||
import numpy as np
|
||||
|
||||
from pandas._typing import (
|
||||
FilePath,
|
||||
ReadBuffer,
|
||||
Scalar,
|
||||
StorageOptions,
|
||||
)
|
||||
from pandas.compat._optional import import_optional_dependency
|
||||
from pandas.util._decorators import doc
|
||||
|
||||
import pandas as pd
|
||||
from pandas.core.shared_docs import _shared_docs
|
||||
|
||||
from pandas.io.excel._base import BaseExcelReader
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from odf.opendocument import OpenDocument
|
||||
|
||||
from pandas._libs.tslibs.nattype import NaTType
|
||||
|
||||
|
||||
@doc(storage_options=_shared_docs["storage_options"])
|
||||
class ODFReader(BaseExcelReader["OpenDocument"]):
|
||||
def __init__(
|
||||
self,
|
||||
filepath_or_buffer: FilePath | ReadBuffer[bytes],
|
||||
storage_options: StorageOptions | None = None,
|
||||
engine_kwargs: dict | None = None,
|
||||
) -> None:
|
||||
"""
|
||||
Read tables out of OpenDocument formatted files.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
filepath_or_buffer : str, path to be parsed or
|
||||
an open readable stream.
|
||||
{storage_options}
|
||||
engine_kwargs : dict, optional
|
||||
Arbitrary keyword arguments passed to excel engine.
|
||||
"""
|
||||
import_optional_dependency("odf")
|
||||
super().__init__(
|
||||
filepath_or_buffer,
|
||||
storage_options=storage_options,
|
||||
engine_kwargs=engine_kwargs,
|
||||
)
|
||||
|
||||
@property
|
||||
def _workbook_class(self) -> type[OpenDocument]:
|
||||
from odf.opendocument import OpenDocument
|
||||
|
||||
return OpenDocument
|
||||
|
||||
def load_workbook(
|
||||
self, filepath_or_buffer: FilePath | ReadBuffer[bytes], engine_kwargs
|
||||
) -> OpenDocument:
|
||||
from odf.opendocument import load
|
||||
|
||||
return load(filepath_or_buffer, **engine_kwargs)
|
||||
|
||||
@property
|
||||
def empty_value(self) -> str:
|
||||
"""Property for compat with other readers."""
|
||||
return ""
|
||||
|
||||
@property
|
||||
def sheet_names(self) -> list[str]:
|
||||
"""Return a list of sheet names present in the document"""
|
||||
from odf.table import Table
|
||||
|
||||
tables = self.book.getElementsByType(Table)
|
||||
return [t.getAttribute("name") for t in tables]
|
||||
|
||||
def get_sheet_by_index(self, index: int):
|
||||
from odf.table import Table
|
||||
|
||||
self.raise_if_bad_sheet_by_index(index)
|
||||
tables = self.book.getElementsByType(Table)
|
||||
return tables[index]
|
||||
|
||||
def get_sheet_by_name(self, name: str):
|
||||
from odf.table import Table
|
||||
|
||||
self.raise_if_bad_sheet_by_name(name)
|
||||
tables = self.book.getElementsByType(Table)
|
||||
|
||||
for table in tables:
|
||||
if table.getAttribute("name") == name:
|
||||
return table
|
||||
|
||||
self.close()
|
||||
raise ValueError(f"sheet {name} not found")
|
||||
|
||||
def get_sheet_data(
|
||||
self, sheet, file_rows_needed: int | None = None
|
||||
) -> list[list[Scalar | NaTType]]:
|
||||
"""
|
||||
Parse an ODF Table into a list of lists
|
||||
"""
|
||||
from odf.table import (
|
||||
CoveredTableCell,
|
||||
TableCell,
|
||||
TableRow,
|
||||
)
|
||||
|
||||
covered_cell_name = CoveredTableCell().qname
|
||||
table_cell_name = TableCell().qname
|
||||
cell_names = {covered_cell_name, table_cell_name}
|
||||
|
||||
sheet_rows = sheet.getElementsByType(TableRow)
|
||||
empty_rows = 0
|
||||
max_row_len = 0
|
||||
|
||||
table: list[list[Scalar | NaTType]] = []
|
||||
|
||||
for sheet_row in sheet_rows:
|
||||
sheet_cells = [
|
||||
x
|
||||
for x in sheet_row.childNodes
|
||||
if hasattr(x, "qname") and x.qname in cell_names
|
||||
]
|
||||
empty_cells = 0
|
||||
table_row: list[Scalar | NaTType] = []
|
||||
|
||||
for sheet_cell in sheet_cells:
|
||||
if sheet_cell.qname == table_cell_name:
|
||||
value = self._get_cell_value(sheet_cell)
|
||||
else:
|
||||
value = self.empty_value
|
||||
|
||||
column_repeat = self._get_column_repeat(sheet_cell)
|
||||
|
||||
# Queue up empty values, writing only if content succeeds them
|
||||
if value == self.empty_value:
|
||||
empty_cells += column_repeat
|
||||
else:
|
||||
table_row.extend([self.empty_value] * empty_cells)
|
||||
empty_cells = 0
|
||||
table_row.extend([value] * column_repeat)
|
||||
|
||||
if max_row_len < len(table_row):
|
||||
max_row_len = len(table_row)
|
||||
|
||||
row_repeat = self._get_row_repeat(sheet_row)
|
||||
if len(table_row) == 0:
|
||||
empty_rows += row_repeat
|
||||
else:
|
||||
# add blank rows to our table
|
||||
table.extend([[self.empty_value]] * empty_rows)
|
||||
empty_rows = 0
|
||||
table.extend(table_row for _ in range(row_repeat))
|
||||
if file_rows_needed is not None and len(table) >= file_rows_needed:
|
||||
break
|
||||
|
||||
# Make our table square
|
||||
for row in table:
|
||||
if len(row) < max_row_len:
|
||||
row.extend([self.empty_value] * (max_row_len - len(row)))
|
||||
|
||||
return table
|
||||
|
||||
def _get_row_repeat(self, row) -> int:
|
||||
"""
|
||||
Return number of times this row was repeated
|
||||
Repeating an empty row appeared to be a common way
|
||||
of representing sparse rows in the table.
|
||||
"""
|
||||
from odf.namespaces import TABLENS
|
||||
|
||||
return int(row.attributes.get((TABLENS, "number-rows-repeated"), 1))
|
||||
|
||||
def _get_column_repeat(self, cell) -> int:
|
||||
from odf.namespaces import TABLENS
|
||||
|
||||
return int(cell.attributes.get((TABLENS, "number-columns-repeated"), 1))
|
||||
|
||||
def _get_cell_value(self, cell) -> Scalar | NaTType:
|
||||
from odf.namespaces import OFFICENS
|
||||
|
||||
if str(cell) == "#N/A":
|
||||
return np.nan
|
||||
|
||||
cell_type = cell.attributes.get((OFFICENS, "value-type"))
|
||||
if cell_type == "boolean":
|
||||
if str(cell) == "TRUE":
|
||||
return True
|
||||
return False
|
||||
if cell_type is None:
|
||||
return self.empty_value
|
||||
elif cell_type == "float":
|
||||
# GH5394
|
||||
cell_value = float(cell.attributes.get((OFFICENS, "value")))
|
||||
val = int(cell_value)
|
||||
if val == cell_value:
|
||||
return val
|
||||
return cell_value
|
||||
elif cell_type == "percentage":
|
||||
cell_value = cell.attributes.get((OFFICENS, "value"))
|
||||
return float(cell_value)
|
||||
elif cell_type == "string":
|
||||
return self._get_cell_string_value(cell)
|
||||
elif cell_type == "currency":
|
||||
cell_value = cell.attributes.get((OFFICENS, "value"))
|
||||
return float(cell_value)
|
||||
elif cell_type == "date":
|
||||
cell_value = cell.attributes.get((OFFICENS, "date-value"))
|
||||
return pd.Timestamp(cell_value)
|
||||
elif cell_type == "time":
|
||||
stamp = pd.Timestamp(str(cell))
|
||||
# cast needed here because Scalar doesn't include datetime.time
|
||||
return cast(Scalar, stamp.time())
|
||||
else:
|
||||
self.close()
|
||||
raise ValueError(f"Unrecognized type {cell_type}")
|
||||
|
||||
def _get_cell_string_value(self, cell) -> str:
|
||||
"""
|
||||
Find and decode OpenDocument text:s tags that represent
|
||||
a run length encoded sequence of space characters.
|
||||
"""
|
||||
from odf.element import Element
|
||||
from odf.namespaces import TEXTNS
|
||||
from odf.office import Annotation
|
||||
from odf.text import S
|
||||
|
||||
office_annotation = Annotation().qname
|
||||
text_s = S().qname
|
||||
|
||||
value = []
|
||||
|
||||
for fragment in cell.childNodes:
|
||||
if isinstance(fragment, Element):
|
||||
if fragment.qname == text_s:
|
||||
spaces = int(fragment.attributes.get((TEXTNS, "c"), 1))
|
||||
value.append(" " * spaces)
|
||||
elif fragment.qname == office_annotation:
|
||||
continue
|
||||
else:
|
||||
# recursive impl needed in case of nested fragments
|
||||
# with multiple spaces
|
||||
# https://github.com/pandas-dev/pandas/pull/36175#discussion_r484639704
|
||||
value.append(self._get_cell_string_value(fragment))
|
||||
else:
|
||||
value.append(str(fragment).strip("\n"))
|
||||
return "".join(value)
|
357
lib/python3.13/site-packages/pandas/io/excel/_odswriter.py
Normal file
357
lib/python3.13/site-packages/pandas/io/excel/_odswriter.py
Normal file
@ -0,0 +1,357 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from collections import defaultdict
|
||||
import datetime
|
||||
import json
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
DefaultDict,
|
||||
cast,
|
||||
overload,
|
||||
)
|
||||
|
||||
from pandas.io.excel._base import ExcelWriter
|
||||
from pandas.io.excel._util import (
|
||||
combine_kwargs,
|
||||
validate_freeze_panes,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pandas._typing import (
|
||||
ExcelWriterIfSheetExists,
|
||||
FilePath,
|
||||
StorageOptions,
|
||||
WriteExcelBuffer,
|
||||
)
|
||||
|
||||
from pandas.io.formats.excel import ExcelCell
|
||||
|
||||
|
||||
class ODSWriter(ExcelWriter):
|
||||
_engine = "odf"
|
||||
_supported_extensions = (".ods",)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
path: FilePath | WriteExcelBuffer | ExcelWriter,
|
||||
engine: str | None = None,
|
||||
date_format: str | None = None,
|
||||
datetime_format=None,
|
||||
mode: str = "w",
|
||||
storage_options: StorageOptions | None = None,
|
||||
if_sheet_exists: ExcelWriterIfSheetExists | None = None,
|
||||
engine_kwargs: dict[str, Any] | None = None,
|
||||
**kwargs,
|
||||
) -> None:
|
||||
from odf.opendocument import OpenDocumentSpreadsheet
|
||||
|
||||
if mode == "a":
|
||||
raise ValueError("Append mode is not supported with odf!")
|
||||
|
||||
engine_kwargs = combine_kwargs(engine_kwargs, kwargs)
|
||||
self._book = OpenDocumentSpreadsheet(**engine_kwargs)
|
||||
|
||||
super().__init__(
|
||||
path,
|
||||
mode=mode,
|
||||
storage_options=storage_options,
|
||||
if_sheet_exists=if_sheet_exists,
|
||||
engine_kwargs=engine_kwargs,
|
||||
)
|
||||
|
||||
self._style_dict: dict[str, str] = {}
|
||||
|
||||
@property
|
||||
def book(self):
|
||||
"""
|
||||
Book instance of class odf.opendocument.OpenDocumentSpreadsheet.
|
||||
|
||||
This attribute can be used to access engine-specific features.
|
||||
"""
|
||||
return self._book
|
||||
|
||||
@property
|
||||
def sheets(self) -> dict[str, Any]:
|
||||
"""Mapping of sheet names to sheet objects."""
|
||||
from odf.table import Table
|
||||
|
||||
result = {
|
||||
sheet.getAttribute("name"): sheet
|
||||
for sheet in self.book.getElementsByType(Table)
|
||||
}
|
||||
return result
|
||||
|
||||
def _save(self) -> None:
|
||||
"""
|
||||
Save workbook to disk.
|
||||
"""
|
||||
for sheet in self.sheets.values():
|
||||
self.book.spreadsheet.addElement(sheet)
|
||||
self.book.save(self._handles.handle)
|
||||
|
||||
def _write_cells(
|
||||
self,
|
||||
cells: list[ExcelCell],
|
||||
sheet_name: str | None = None,
|
||||
startrow: int = 0,
|
||||
startcol: int = 0,
|
||||
freeze_panes: tuple[int, int] | None = None,
|
||||
) -> None:
|
||||
"""
|
||||
Write the frame cells using odf
|
||||
"""
|
||||
from odf.table import (
|
||||
Table,
|
||||
TableCell,
|
||||
TableRow,
|
||||
)
|
||||
from odf.text import P
|
||||
|
||||
sheet_name = self._get_sheet_name(sheet_name)
|
||||
assert sheet_name is not None
|
||||
|
||||
if sheet_name in self.sheets:
|
||||
wks = self.sheets[sheet_name]
|
||||
else:
|
||||
wks = Table(name=sheet_name)
|
||||
self.book.spreadsheet.addElement(wks)
|
||||
|
||||
if validate_freeze_panes(freeze_panes):
|
||||
freeze_panes = cast(tuple[int, int], freeze_panes)
|
||||
self._create_freeze_panes(sheet_name, freeze_panes)
|
||||
|
||||
for _ in range(startrow):
|
||||
wks.addElement(TableRow())
|
||||
|
||||
rows: DefaultDict = defaultdict(TableRow)
|
||||
col_count: DefaultDict = defaultdict(int)
|
||||
|
||||
for cell in sorted(cells, key=lambda cell: (cell.row, cell.col)):
|
||||
# only add empty cells if the row is still empty
|
||||
if not col_count[cell.row]:
|
||||
for _ in range(startcol):
|
||||
rows[cell.row].addElement(TableCell())
|
||||
|
||||
# fill with empty cells if needed
|
||||
for _ in range(cell.col - col_count[cell.row]):
|
||||
rows[cell.row].addElement(TableCell())
|
||||
col_count[cell.row] += 1
|
||||
|
||||
pvalue, tc = self._make_table_cell(cell)
|
||||
rows[cell.row].addElement(tc)
|
||||
col_count[cell.row] += 1
|
||||
p = P(text=pvalue)
|
||||
tc.addElement(p)
|
||||
|
||||
# add all rows to the sheet
|
||||
if len(rows) > 0:
|
||||
for row_nr in range(max(rows.keys()) + 1):
|
||||
wks.addElement(rows[row_nr])
|
||||
|
||||
def _make_table_cell_attributes(self, cell) -> dict[str, int | str]:
|
||||
"""Convert cell attributes to OpenDocument attributes
|
||||
|
||||
Parameters
|
||||
----------
|
||||
cell : ExcelCell
|
||||
Spreadsheet cell data
|
||||
|
||||
Returns
|
||||
-------
|
||||
attributes : Dict[str, Union[int, str]]
|
||||
Dictionary with attributes and attribute values
|
||||
"""
|
||||
attributes: dict[str, int | str] = {}
|
||||
style_name = self._process_style(cell.style)
|
||||
if style_name is not None:
|
||||
attributes["stylename"] = style_name
|
||||
if cell.mergestart is not None and cell.mergeend is not None:
|
||||
attributes["numberrowsspanned"] = max(1, cell.mergestart)
|
||||
attributes["numbercolumnsspanned"] = cell.mergeend
|
||||
return attributes
|
||||
|
||||
def _make_table_cell(self, cell) -> tuple[object, Any]:
|
||||
"""Convert cell data to an OpenDocument spreadsheet cell
|
||||
|
||||
Parameters
|
||||
----------
|
||||
cell : ExcelCell
|
||||
Spreadsheet cell data
|
||||
|
||||
Returns
|
||||
-------
|
||||
pvalue, cell : Tuple[str, TableCell]
|
||||
Display value, Cell value
|
||||
"""
|
||||
from odf.table import TableCell
|
||||
|
||||
attributes = self._make_table_cell_attributes(cell)
|
||||
val, fmt = self._value_with_fmt(cell.val)
|
||||
pvalue = value = val
|
||||
if isinstance(val, bool):
|
||||
value = str(val).lower()
|
||||
pvalue = str(val).upper()
|
||||
return (
|
||||
pvalue,
|
||||
TableCell(
|
||||
valuetype="boolean",
|
||||
booleanvalue=value,
|
||||
attributes=attributes,
|
||||
),
|
||||
)
|
||||
elif isinstance(val, datetime.datetime):
|
||||
# Fast formatting
|
||||
value = val.isoformat()
|
||||
# Slow but locale-dependent
|
||||
pvalue = val.strftime("%c")
|
||||
return (
|
||||
pvalue,
|
||||
TableCell(valuetype="date", datevalue=value, attributes=attributes),
|
||||
)
|
||||
elif isinstance(val, datetime.date):
|
||||
# Fast formatting
|
||||
value = f"{val.year}-{val.month:02d}-{val.day:02d}"
|
||||
# Slow but locale-dependent
|
||||
pvalue = val.strftime("%x")
|
||||
return (
|
||||
pvalue,
|
||||
TableCell(valuetype="date", datevalue=value, attributes=attributes),
|
||||
)
|
||||
elif isinstance(val, str):
|
||||
return (
|
||||
pvalue,
|
||||
TableCell(
|
||||
valuetype="string",
|
||||
stringvalue=value,
|
||||
attributes=attributes,
|
||||
),
|
||||
)
|
||||
else:
|
||||
return (
|
||||
pvalue,
|
||||
TableCell(
|
||||
valuetype="float",
|
||||
value=value,
|
||||
attributes=attributes,
|
||||
),
|
||||
)
|
||||
|
||||
@overload
|
||||
def _process_style(self, style: dict[str, Any]) -> str:
|
||||
...
|
||||
|
||||
@overload
|
||||
def _process_style(self, style: None) -> None:
|
||||
...
|
||||
|
||||
def _process_style(self, style: dict[str, Any] | None) -> str | None:
|
||||
"""Convert a style dictionary to a OpenDocument style sheet
|
||||
|
||||
Parameters
|
||||
----------
|
||||
style : Dict
|
||||
Style dictionary
|
||||
|
||||
Returns
|
||||
-------
|
||||
style_key : str
|
||||
Unique style key for later reference in sheet
|
||||
"""
|
||||
from odf.style import (
|
||||
ParagraphProperties,
|
||||
Style,
|
||||
TableCellProperties,
|
||||
TextProperties,
|
||||
)
|
||||
|
||||
if style is None:
|
||||
return None
|
||||
style_key = json.dumps(style)
|
||||
if style_key in self._style_dict:
|
||||
return self._style_dict[style_key]
|
||||
name = f"pd{len(self._style_dict)+1}"
|
||||
self._style_dict[style_key] = name
|
||||
odf_style = Style(name=name, family="table-cell")
|
||||
if "font" in style:
|
||||
font = style["font"]
|
||||
if font.get("bold", False):
|
||||
odf_style.addElement(TextProperties(fontweight="bold"))
|
||||
if "borders" in style:
|
||||
borders = style["borders"]
|
||||
for side, thickness in borders.items():
|
||||
thickness_translation = {"thin": "0.75pt solid #000000"}
|
||||
odf_style.addElement(
|
||||
TableCellProperties(
|
||||
attributes={f"border{side}": thickness_translation[thickness]}
|
||||
)
|
||||
)
|
||||
if "alignment" in style:
|
||||
alignment = style["alignment"]
|
||||
horizontal = alignment.get("horizontal")
|
||||
if horizontal:
|
||||
odf_style.addElement(ParagraphProperties(textalign=horizontal))
|
||||
vertical = alignment.get("vertical")
|
||||
if vertical:
|
||||
odf_style.addElement(TableCellProperties(verticalalign=vertical))
|
||||
self.book.styles.addElement(odf_style)
|
||||
return name
|
||||
|
||||
def _create_freeze_panes(
|
||||
self, sheet_name: str, freeze_panes: tuple[int, int]
|
||||
) -> None:
|
||||
"""
|
||||
Create freeze panes in the sheet.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
sheet_name : str
|
||||
Name of the spreadsheet
|
||||
freeze_panes : tuple of (int, int)
|
||||
Freeze pane location x and y
|
||||
"""
|
||||
from odf.config import (
|
||||
ConfigItem,
|
||||
ConfigItemMapEntry,
|
||||
ConfigItemMapIndexed,
|
||||
ConfigItemMapNamed,
|
||||
ConfigItemSet,
|
||||
)
|
||||
|
||||
config_item_set = ConfigItemSet(name="ooo:view-settings")
|
||||
self.book.settings.addElement(config_item_set)
|
||||
|
||||
config_item_map_indexed = ConfigItemMapIndexed(name="Views")
|
||||
config_item_set.addElement(config_item_map_indexed)
|
||||
|
||||
config_item_map_entry = ConfigItemMapEntry()
|
||||
config_item_map_indexed.addElement(config_item_map_entry)
|
||||
|
||||
config_item_map_named = ConfigItemMapNamed(name="Tables")
|
||||
config_item_map_entry.addElement(config_item_map_named)
|
||||
|
||||
config_item_map_entry = ConfigItemMapEntry(name=sheet_name)
|
||||
config_item_map_named.addElement(config_item_map_entry)
|
||||
|
||||
config_item_map_entry.addElement(
|
||||
ConfigItem(name="HorizontalSplitMode", type="short", text="2")
|
||||
)
|
||||
config_item_map_entry.addElement(
|
||||
ConfigItem(name="VerticalSplitMode", type="short", text="2")
|
||||
)
|
||||
config_item_map_entry.addElement(
|
||||
ConfigItem(
|
||||
name="HorizontalSplitPosition", type="int", text=str(freeze_panes[0])
|
||||
)
|
||||
)
|
||||
config_item_map_entry.addElement(
|
||||
ConfigItem(
|
||||
name="VerticalSplitPosition", type="int", text=str(freeze_panes[1])
|
||||
)
|
||||
)
|
||||
config_item_map_entry.addElement(
|
||||
ConfigItem(name="PositionRight", type="int", text=str(freeze_panes[0]))
|
||||
)
|
||||
config_item_map_entry.addElement(
|
||||
ConfigItem(name="PositionBottom", type="int", text=str(freeze_panes[1]))
|
||||
)
|
639
lib/python3.13/site-packages/pandas/io/excel/_openpyxl.py
Normal file
639
lib/python3.13/site-packages/pandas/io/excel/_openpyxl.py
Normal file
@ -0,0 +1,639 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import mmap
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
cast,
|
||||
)
|
||||
|
||||
import numpy as np
|
||||
|
||||
from pandas.compat._optional import import_optional_dependency
|
||||
from pandas.util._decorators import doc
|
||||
|
||||
from pandas.core.shared_docs import _shared_docs
|
||||
|
||||
from pandas.io.excel._base import (
|
||||
BaseExcelReader,
|
||||
ExcelWriter,
|
||||
)
|
||||
from pandas.io.excel._util import (
|
||||
combine_kwargs,
|
||||
validate_freeze_panes,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from openpyxl import Workbook
|
||||
from openpyxl.descriptors.serialisable import Serialisable
|
||||
|
||||
from pandas._typing import (
|
||||
ExcelWriterIfSheetExists,
|
||||
FilePath,
|
||||
ReadBuffer,
|
||||
Scalar,
|
||||
StorageOptions,
|
||||
WriteExcelBuffer,
|
||||
)
|
||||
|
||||
|
||||
class OpenpyxlWriter(ExcelWriter):
|
||||
_engine = "openpyxl"
|
||||
_supported_extensions = (".xlsx", ".xlsm")
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
path: FilePath | WriteExcelBuffer | ExcelWriter,
|
||||
engine: str | None = None,
|
||||
date_format: str | None = None,
|
||||
datetime_format: str | None = None,
|
||||
mode: str = "w",
|
||||
storage_options: StorageOptions | None = None,
|
||||
if_sheet_exists: ExcelWriterIfSheetExists | None = None,
|
||||
engine_kwargs: dict[str, Any] | None = None,
|
||||
**kwargs,
|
||||
) -> None:
|
||||
# Use the openpyxl module as the Excel writer.
|
||||
from openpyxl.workbook import Workbook
|
||||
|
||||
engine_kwargs = combine_kwargs(engine_kwargs, kwargs)
|
||||
|
||||
super().__init__(
|
||||
path,
|
||||
mode=mode,
|
||||
storage_options=storage_options,
|
||||
if_sheet_exists=if_sheet_exists,
|
||||
engine_kwargs=engine_kwargs,
|
||||
)
|
||||
|
||||
# ExcelWriter replaced "a" by "r+" to allow us to first read the excel file from
|
||||
# the file and later write to it
|
||||
if "r+" in self._mode: # Load from existing workbook
|
||||
from openpyxl import load_workbook
|
||||
|
||||
try:
|
||||
self._book = load_workbook(self._handles.handle, **engine_kwargs)
|
||||
except TypeError:
|
||||
self._handles.handle.close()
|
||||
raise
|
||||
self._handles.handle.seek(0)
|
||||
else:
|
||||
# Create workbook object with default optimized_write=True.
|
||||
try:
|
||||
self._book = Workbook(**engine_kwargs)
|
||||
except TypeError:
|
||||
self._handles.handle.close()
|
||||
raise
|
||||
|
||||
if self.book.worksheets:
|
||||
self.book.remove(self.book.worksheets[0])
|
||||
|
||||
@property
|
||||
def book(self) -> Workbook:
|
||||
"""
|
||||
Book instance of class openpyxl.workbook.Workbook.
|
||||
|
||||
This attribute can be used to access engine-specific features.
|
||||
"""
|
||||
return self._book
|
||||
|
||||
@property
|
||||
def sheets(self) -> dict[str, Any]:
|
||||
"""Mapping of sheet names to sheet objects."""
|
||||
result = {name: self.book[name] for name in self.book.sheetnames}
|
||||
return result
|
||||
|
||||
def _save(self) -> None:
|
||||
"""
|
||||
Save workbook to disk.
|
||||
"""
|
||||
self.book.save(self._handles.handle)
|
||||
if "r+" in self._mode and not isinstance(self._handles.handle, mmap.mmap):
|
||||
# truncate file to the written content
|
||||
self._handles.handle.truncate()
|
||||
|
||||
@classmethod
|
||||
def _convert_to_style_kwargs(cls, style_dict: dict) -> dict[str, Serialisable]:
|
||||
"""
|
||||
Convert a style_dict to a set of kwargs suitable for initializing
|
||||
or updating-on-copy an openpyxl v2 style object.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
style_dict : dict
|
||||
A dict with zero or more of the following keys (or their synonyms).
|
||||
'font'
|
||||
'fill'
|
||||
'border' ('borders')
|
||||
'alignment'
|
||||
'number_format'
|
||||
'protection'
|
||||
|
||||
Returns
|
||||
-------
|
||||
style_kwargs : dict
|
||||
A dict with the same, normalized keys as ``style_dict`` but each
|
||||
value has been replaced with a native openpyxl style object of the
|
||||
appropriate class.
|
||||
"""
|
||||
_style_key_map = {"borders": "border"}
|
||||
|
||||
style_kwargs: dict[str, Serialisable] = {}
|
||||
for k, v in style_dict.items():
|
||||
k = _style_key_map.get(k, k)
|
||||
_conv_to_x = getattr(cls, f"_convert_to_{k}", lambda x: None)
|
||||
new_v = _conv_to_x(v)
|
||||
if new_v:
|
||||
style_kwargs[k] = new_v
|
||||
|
||||
return style_kwargs
|
||||
|
||||
@classmethod
|
||||
def _convert_to_color(cls, color_spec):
|
||||
"""
|
||||
Convert ``color_spec`` to an openpyxl v2 Color object.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
color_spec : str, dict
|
||||
A 32-bit ARGB hex string, or a dict with zero or more of the
|
||||
following keys.
|
||||
'rgb'
|
||||
'indexed'
|
||||
'auto'
|
||||
'theme'
|
||||
'tint'
|
||||
'index'
|
||||
'type'
|
||||
|
||||
Returns
|
||||
-------
|
||||
color : openpyxl.styles.Color
|
||||
"""
|
||||
from openpyxl.styles import Color
|
||||
|
||||
if isinstance(color_spec, str):
|
||||
return Color(color_spec)
|
||||
else:
|
||||
return Color(**color_spec)
|
||||
|
||||
@classmethod
|
||||
def _convert_to_font(cls, font_dict):
|
||||
"""
|
||||
Convert ``font_dict`` to an openpyxl v2 Font object.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
font_dict : dict
|
||||
A dict with zero or more of the following keys (or their synonyms).
|
||||
'name'
|
||||
'size' ('sz')
|
||||
'bold' ('b')
|
||||
'italic' ('i')
|
||||
'underline' ('u')
|
||||
'strikethrough' ('strike')
|
||||
'color'
|
||||
'vertAlign' ('vertalign')
|
||||
'charset'
|
||||
'scheme'
|
||||
'family'
|
||||
'outline'
|
||||
'shadow'
|
||||
'condense'
|
||||
|
||||
Returns
|
||||
-------
|
||||
font : openpyxl.styles.Font
|
||||
"""
|
||||
from openpyxl.styles import Font
|
||||
|
||||
_font_key_map = {
|
||||
"sz": "size",
|
||||
"b": "bold",
|
||||
"i": "italic",
|
||||
"u": "underline",
|
||||
"strike": "strikethrough",
|
||||
"vertalign": "vertAlign",
|
||||
}
|
||||
|
||||
font_kwargs = {}
|
||||
for k, v in font_dict.items():
|
||||
k = _font_key_map.get(k, k)
|
||||
if k == "color":
|
||||
v = cls._convert_to_color(v)
|
||||
font_kwargs[k] = v
|
||||
|
||||
return Font(**font_kwargs)
|
||||
|
||||
@classmethod
|
||||
def _convert_to_stop(cls, stop_seq):
|
||||
"""
|
||||
Convert ``stop_seq`` to a list of openpyxl v2 Color objects,
|
||||
suitable for initializing the ``GradientFill`` ``stop`` parameter.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
stop_seq : iterable
|
||||
An iterable that yields objects suitable for consumption by
|
||||
``_convert_to_color``.
|
||||
|
||||
Returns
|
||||
-------
|
||||
stop : list of openpyxl.styles.Color
|
||||
"""
|
||||
return map(cls._convert_to_color, stop_seq)
|
||||
|
||||
@classmethod
|
||||
def _convert_to_fill(cls, fill_dict: dict[str, Any]):
|
||||
"""
|
||||
Convert ``fill_dict`` to an openpyxl v2 Fill object.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fill_dict : dict
|
||||
A dict with one or more of the following keys (or their synonyms),
|
||||
'fill_type' ('patternType', 'patterntype')
|
||||
'start_color' ('fgColor', 'fgcolor')
|
||||
'end_color' ('bgColor', 'bgcolor')
|
||||
or one or more of the following keys (or their synonyms).
|
||||
'type' ('fill_type')
|
||||
'degree'
|
||||
'left'
|
||||
'right'
|
||||
'top'
|
||||
'bottom'
|
||||
'stop'
|
||||
|
||||
Returns
|
||||
-------
|
||||
fill : openpyxl.styles.Fill
|
||||
"""
|
||||
from openpyxl.styles import (
|
||||
GradientFill,
|
||||
PatternFill,
|
||||
)
|
||||
|
||||
_pattern_fill_key_map = {
|
||||
"patternType": "fill_type",
|
||||
"patterntype": "fill_type",
|
||||
"fgColor": "start_color",
|
||||
"fgcolor": "start_color",
|
||||
"bgColor": "end_color",
|
||||
"bgcolor": "end_color",
|
||||
}
|
||||
|
||||
_gradient_fill_key_map = {"fill_type": "type"}
|
||||
|
||||
pfill_kwargs = {}
|
||||
gfill_kwargs = {}
|
||||
for k, v in fill_dict.items():
|
||||
pk = _pattern_fill_key_map.get(k)
|
||||
gk = _gradient_fill_key_map.get(k)
|
||||
if pk in ["start_color", "end_color"]:
|
||||
v = cls._convert_to_color(v)
|
||||
if gk == "stop":
|
||||
v = cls._convert_to_stop(v)
|
||||
if pk:
|
||||
pfill_kwargs[pk] = v
|
||||
elif gk:
|
||||
gfill_kwargs[gk] = v
|
||||
else:
|
||||
pfill_kwargs[k] = v
|
||||
gfill_kwargs[k] = v
|
||||
|
||||
try:
|
||||
return PatternFill(**pfill_kwargs)
|
||||
except TypeError:
|
||||
return GradientFill(**gfill_kwargs)
|
||||
|
||||
@classmethod
|
||||
def _convert_to_side(cls, side_spec):
|
||||
"""
|
||||
Convert ``side_spec`` to an openpyxl v2 Side object.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
side_spec : str, dict
|
||||
A string specifying the border style, or a dict with zero or more
|
||||
of the following keys (or their synonyms).
|
||||
'style' ('border_style')
|
||||
'color'
|
||||
|
||||
Returns
|
||||
-------
|
||||
side : openpyxl.styles.Side
|
||||
"""
|
||||
from openpyxl.styles import Side
|
||||
|
||||
_side_key_map = {"border_style": "style"}
|
||||
|
||||
if isinstance(side_spec, str):
|
||||
return Side(style=side_spec)
|
||||
|
||||
side_kwargs = {}
|
||||
for k, v in side_spec.items():
|
||||
k = _side_key_map.get(k, k)
|
||||
if k == "color":
|
||||
v = cls._convert_to_color(v)
|
||||
side_kwargs[k] = v
|
||||
|
||||
return Side(**side_kwargs)
|
||||
|
||||
@classmethod
|
||||
def _convert_to_border(cls, border_dict):
|
||||
"""
|
||||
Convert ``border_dict`` to an openpyxl v2 Border object.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
border_dict : dict
|
||||
A dict with zero or more of the following keys (or their synonyms).
|
||||
'left'
|
||||
'right'
|
||||
'top'
|
||||
'bottom'
|
||||
'diagonal'
|
||||
'diagonal_direction'
|
||||
'vertical'
|
||||
'horizontal'
|
||||
'diagonalUp' ('diagonalup')
|
||||
'diagonalDown' ('diagonaldown')
|
||||
'outline'
|
||||
|
||||
Returns
|
||||
-------
|
||||
border : openpyxl.styles.Border
|
||||
"""
|
||||
from openpyxl.styles import Border
|
||||
|
||||
_border_key_map = {"diagonalup": "diagonalUp", "diagonaldown": "diagonalDown"}
|
||||
|
||||
border_kwargs = {}
|
||||
for k, v in border_dict.items():
|
||||
k = _border_key_map.get(k, k)
|
||||
if k == "color":
|
||||
v = cls._convert_to_color(v)
|
||||
if k in ["left", "right", "top", "bottom", "diagonal"]:
|
||||
v = cls._convert_to_side(v)
|
||||
border_kwargs[k] = v
|
||||
|
||||
return Border(**border_kwargs)
|
||||
|
||||
@classmethod
|
||||
def _convert_to_alignment(cls, alignment_dict):
|
||||
"""
|
||||
Convert ``alignment_dict`` to an openpyxl v2 Alignment object.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
alignment_dict : dict
|
||||
A dict with zero or more of the following keys (or their synonyms).
|
||||
'horizontal'
|
||||
'vertical'
|
||||
'text_rotation'
|
||||
'wrap_text'
|
||||
'shrink_to_fit'
|
||||
'indent'
|
||||
Returns
|
||||
-------
|
||||
alignment : openpyxl.styles.Alignment
|
||||
"""
|
||||
from openpyxl.styles import Alignment
|
||||
|
||||
return Alignment(**alignment_dict)
|
||||
|
||||
@classmethod
|
||||
def _convert_to_number_format(cls, number_format_dict):
|
||||
"""
|
||||
Convert ``number_format_dict`` to an openpyxl v2.1.0 number format
|
||||
initializer.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
number_format_dict : dict
|
||||
A dict with zero or more of the following keys.
|
||||
'format_code' : str
|
||||
|
||||
Returns
|
||||
-------
|
||||
number_format : str
|
||||
"""
|
||||
return number_format_dict["format_code"]
|
||||
|
||||
@classmethod
|
||||
def _convert_to_protection(cls, protection_dict):
|
||||
"""
|
||||
Convert ``protection_dict`` to an openpyxl v2 Protection object.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
protection_dict : dict
|
||||
A dict with zero or more of the following keys.
|
||||
'locked'
|
||||
'hidden'
|
||||
|
||||
Returns
|
||||
-------
|
||||
"""
|
||||
from openpyxl.styles import Protection
|
||||
|
||||
return Protection(**protection_dict)
|
||||
|
||||
def _write_cells(
|
||||
self,
|
||||
cells,
|
||||
sheet_name: str | None = None,
|
||||
startrow: int = 0,
|
||||
startcol: int = 0,
|
||||
freeze_panes: tuple[int, int] | None = None,
|
||||
) -> None:
|
||||
# Write the frame cells using openpyxl.
|
||||
sheet_name = self._get_sheet_name(sheet_name)
|
||||
|
||||
_style_cache: dict[str, dict[str, Serialisable]] = {}
|
||||
|
||||
if sheet_name in self.sheets and self._if_sheet_exists != "new":
|
||||
if "r+" in self._mode:
|
||||
if self._if_sheet_exists == "replace":
|
||||
old_wks = self.sheets[sheet_name]
|
||||
target_index = self.book.index(old_wks)
|
||||
del self.book[sheet_name]
|
||||
wks = self.book.create_sheet(sheet_name, target_index)
|
||||
elif self._if_sheet_exists == "error":
|
||||
raise ValueError(
|
||||
f"Sheet '{sheet_name}' already exists and "
|
||||
f"if_sheet_exists is set to 'error'."
|
||||
)
|
||||
elif self._if_sheet_exists == "overlay":
|
||||
wks = self.sheets[sheet_name]
|
||||
else:
|
||||
raise ValueError(
|
||||
f"'{self._if_sheet_exists}' is not valid for if_sheet_exists. "
|
||||
"Valid options are 'error', 'new', 'replace' and 'overlay'."
|
||||
)
|
||||
else:
|
||||
wks = self.sheets[sheet_name]
|
||||
else:
|
||||
wks = self.book.create_sheet()
|
||||
wks.title = sheet_name
|
||||
|
||||
if validate_freeze_panes(freeze_panes):
|
||||
freeze_panes = cast(tuple[int, int], freeze_panes)
|
||||
wks.freeze_panes = wks.cell(
|
||||
row=freeze_panes[0] + 1, column=freeze_panes[1] + 1
|
||||
)
|
||||
|
||||
for cell in cells:
|
||||
xcell = wks.cell(
|
||||
row=startrow + cell.row + 1, column=startcol + cell.col + 1
|
||||
)
|
||||
xcell.value, fmt = self._value_with_fmt(cell.val)
|
||||
if fmt:
|
||||
xcell.number_format = fmt
|
||||
|
||||
style_kwargs: dict[str, Serialisable] | None = {}
|
||||
if cell.style:
|
||||
key = str(cell.style)
|
||||
style_kwargs = _style_cache.get(key)
|
||||
if style_kwargs is None:
|
||||
style_kwargs = self._convert_to_style_kwargs(cell.style)
|
||||
_style_cache[key] = style_kwargs
|
||||
|
||||
if style_kwargs:
|
||||
for k, v in style_kwargs.items():
|
||||
setattr(xcell, k, v)
|
||||
|
||||
if cell.mergestart is not None and cell.mergeend is not None:
|
||||
wks.merge_cells(
|
||||
start_row=startrow + cell.row + 1,
|
||||
start_column=startcol + cell.col + 1,
|
||||
end_column=startcol + cell.mergeend + 1,
|
||||
end_row=startrow + cell.mergestart + 1,
|
||||
)
|
||||
|
||||
# When cells are merged only the top-left cell is preserved
|
||||
# The behaviour of the other cells in a merged range is
|
||||
# undefined
|
||||
if style_kwargs:
|
||||
first_row = startrow + cell.row + 1
|
||||
last_row = startrow + cell.mergestart + 1
|
||||
first_col = startcol + cell.col + 1
|
||||
last_col = startcol + cell.mergeend + 1
|
||||
|
||||
for row in range(first_row, last_row + 1):
|
||||
for col in range(first_col, last_col + 1):
|
||||
if row == first_row and col == first_col:
|
||||
# Ignore first cell. It is already handled.
|
||||
continue
|
||||
xcell = wks.cell(column=col, row=row)
|
||||
for k, v in style_kwargs.items():
|
||||
setattr(xcell, k, v)
|
||||
|
||||
|
||||
class OpenpyxlReader(BaseExcelReader["Workbook"]):
|
||||
@doc(storage_options=_shared_docs["storage_options"])
|
||||
def __init__(
|
||||
self,
|
||||
filepath_or_buffer: FilePath | ReadBuffer[bytes],
|
||||
storage_options: StorageOptions | None = None,
|
||||
engine_kwargs: dict | None = None,
|
||||
) -> None:
|
||||
"""
|
||||
Reader using openpyxl engine.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
filepath_or_buffer : str, path object or Workbook
|
||||
Object to be parsed.
|
||||
{storage_options}
|
||||
engine_kwargs : dict, optional
|
||||
Arbitrary keyword arguments passed to excel engine.
|
||||
"""
|
||||
import_optional_dependency("openpyxl")
|
||||
super().__init__(
|
||||
filepath_or_buffer,
|
||||
storage_options=storage_options,
|
||||
engine_kwargs=engine_kwargs,
|
||||
)
|
||||
|
||||
@property
|
||||
def _workbook_class(self) -> type[Workbook]:
|
||||
from openpyxl import Workbook
|
||||
|
||||
return Workbook
|
||||
|
||||
def load_workbook(
|
||||
self, filepath_or_buffer: FilePath | ReadBuffer[bytes], engine_kwargs
|
||||
) -> Workbook:
|
||||
from openpyxl import load_workbook
|
||||
|
||||
default_kwargs = {"read_only": True, "data_only": True, "keep_links": False}
|
||||
|
||||
return load_workbook(
|
||||
filepath_or_buffer,
|
||||
**(default_kwargs | engine_kwargs),
|
||||
)
|
||||
|
||||
@property
|
||||
def sheet_names(self) -> list[str]:
|
||||
return [sheet.title for sheet in self.book.worksheets]
|
||||
|
||||
def get_sheet_by_name(self, name: str):
|
||||
self.raise_if_bad_sheet_by_name(name)
|
||||
return self.book[name]
|
||||
|
||||
def get_sheet_by_index(self, index: int):
|
||||
self.raise_if_bad_sheet_by_index(index)
|
||||
return self.book.worksheets[index]
|
||||
|
||||
def _convert_cell(self, cell) -> Scalar:
|
||||
from openpyxl.cell.cell import (
|
||||
TYPE_ERROR,
|
||||
TYPE_NUMERIC,
|
||||
)
|
||||
|
||||
if cell.value is None:
|
||||
return "" # compat with xlrd
|
||||
elif cell.data_type == TYPE_ERROR:
|
||||
return np.nan
|
||||
elif cell.data_type == TYPE_NUMERIC:
|
||||
val = int(cell.value)
|
||||
if val == cell.value:
|
||||
return val
|
||||
return float(cell.value)
|
||||
|
||||
return cell.value
|
||||
|
||||
def get_sheet_data(
|
||||
self, sheet, file_rows_needed: int | None = None
|
||||
) -> list[list[Scalar]]:
|
||||
if self.book.read_only:
|
||||
sheet.reset_dimensions()
|
||||
|
||||
data: list[list[Scalar]] = []
|
||||
last_row_with_data = -1
|
||||
for row_number, row in enumerate(sheet.rows):
|
||||
converted_row = [self._convert_cell(cell) for cell in row]
|
||||
while converted_row and converted_row[-1] == "":
|
||||
# trim trailing empty elements
|
||||
converted_row.pop()
|
||||
if converted_row:
|
||||
last_row_with_data = row_number
|
||||
data.append(converted_row)
|
||||
if file_rows_needed is not None and len(data) >= file_rows_needed:
|
||||
break
|
||||
|
||||
# Trim trailing empty rows
|
||||
data = data[: last_row_with_data + 1]
|
||||
|
||||
if len(data) > 0:
|
||||
# extend rows to max width
|
||||
max_width = max(len(data_row) for data_row in data)
|
||||
if min(len(data_row) for data_row in data) < max_width:
|
||||
empty_cell: list[Scalar] = [""]
|
||||
data = [
|
||||
data_row + (max_width - len(data_row)) * empty_cell
|
||||
for data_row in data
|
||||
]
|
||||
|
||||
return data
|
127
lib/python3.13/site-packages/pandas/io/excel/_pyxlsb.py
Normal file
127
lib/python3.13/site-packages/pandas/io/excel/_pyxlsb.py
Normal file
@ -0,0 +1,127 @@
|
||||
# pyright: reportMissingImports=false
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from pandas.compat._optional import import_optional_dependency
|
||||
from pandas.util._decorators import doc
|
||||
|
||||
from pandas.core.shared_docs import _shared_docs
|
||||
|
||||
from pandas.io.excel._base import BaseExcelReader
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pyxlsb import Workbook
|
||||
|
||||
from pandas._typing import (
|
||||
FilePath,
|
||||
ReadBuffer,
|
||||
Scalar,
|
||||
StorageOptions,
|
||||
)
|
||||
|
||||
|
||||
class PyxlsbReader(BaseExcelReader["Workbook"]):
|
||||
@doc(storage_options=_shared_docs["storage_options"])
|
||||
def __init__(
|
||||
self,
|
||||
filepath_or_buffer: FilePath | ReadBuffer[bytes],
|
||||
storage_options: StorageOptions | None = None,
|
||||
engine_kwargs: dict | None = None,
|
||||
) -> None:
|
||||
"""
|
||||
Reader using pyxlsb engine.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
filepath_or_buffer : str, path object, or Workbook
|
||||
Object to be parsed.
|
||||
{storage_options}
|
||||
engine_kwargs : dict, optional
|
||||
Arbitrary keyword arguments passed to excel engine.
|
||||
"""
|
||||
import_optional_dependency("pyxlsb")
|
||||
# This will call load_workbook on the filepath or buffer
|
||||
# And set the result to the book-attribute
|
||||
super().__init__(
|
||||
filepath_or_buffer,
|
||||
storage_options=storage_options,
|
||||
engine_kwargs=engine_kwargs,
|
||||
)
|
||||
|
||||
@property
|
||||
def _workbook_class(self) -> type[Workbook]:
|
||||
from pyxlsb import Workbook
|
||||
|
||||
return Workbook
|
||||
|
||||
def load_workbook(
|
||||
self, filepath_or_buffer: FilePath | ReadBuffer[bytes], engine_kwargs
|
||||
) -> Workbook:
|
||||
from pyxlsb import open_workbook
|
||||
|
||||
# TODO: hack in buffer capability
|
||||
# This might need some modifications to the Pyxlsb library
|
||||
# Actual work for opening it is in xlsbpackage.py, line 20-ish
|
||||
|
||||
return open_workbook(filepath_or_buffer, **engine_kwargs)
|
||||
|
||||
@property
|
||||
def sheet_names(self) -> list[str]:
|
||||
return self.book.sheets
|
||||
|
||||
def get_sheet_by_name(self, name: str):
|
||||
self.raise_if_bad_sheet_by_name(name)
|
||||
return self.book.get_sheet(name)
|
||||
|
||||
def get_sheet_by_index(self, index: int):
|
||||
self.raise_if_bad_sheet_by_index(index)
|
||||
# pyxlsb sheets are indexed from 1 onwards
|
||||
# There's a fix for this in the source, but the pypi package doesn't have it
|
||||
return self.book.get_sheet(index + 1)
|
||||
|
||||
def _convert_cell(self, cell) -> Scalar:
|
||||
# TODO: there is no way to distinguish between floats and datetimes in pyxlsb
|
||||
# This means that there is no way to read datetime types from an xlsb file yet
|
||||
if cell.v is None:
|
||||
return "" # Prevents non-named columns from not showing up as Unnamed: i
|
||||
if isinstance(cell.v, float):
|
||||
val = int(cell.v)
|
||||
if val == cell.v:
|
||||
return val
|
||||
else:
|
||||
return float(cell.v)
|
||||
|
||||
return cell.v
|
||||
|
||||
def get_sheet_data(
|
||||
self,
|
||||
sheet,
|
||||
file_rows_needed: int | None = None,
|
||||
) -> list[list[Scalar]]:
|
||||
data: list[list[Scalar]] = []
|
||||
previous_row_number = -1
|
||||
# When sparse=True the rows can have different lengths and empty rows are
|
||||
# not returned. The cells are namedtuples of row, col, value (r, c, v).
|
||||
for row in sheet.rows(sparse=True):
|
||||
row_number = row[0].r
|
||||
converted_row = [self._convert_cell(cell) for cell in row]
|
||||
while converted_row and converted_row[-1] == "":
|
||||
# trim trailing empty elements
|
||||
converted_row.pop()
|
||||
if converted_row:
|
||||
data.extend([[]] * (row_number - previous_row_number - 1))
|
||||
data.append(converted_row)
|
||||
previous_row_number = row_number
|
||||
if file_rows_needed is not None and len(data) >= file_rows_needed:
|
||||
break
|
||||
if data:
|
||||
# extend rows to max_width
|
||||
max_width = max(len(data_row) for data_row in data)
|
||||
if min(len(data_row) for data_row in data) < max_width:
|
||||
empty_cell: list[Scalar] = [""]
|
||||
data = [
|
||||
data_row + (max_width - len(data_row)) * empty_cell
|
||||
for data_row in data
|
||||
]
|
||||
return data
|
334
lib/python3.13/site-packages/pandas/io/excel/_util.py
Normal file
334
lib/python3.13/site-packages/pandas/io/excel/_util.py
Normal file
@ -0,0 +1,334 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import (
|
||||
Hashable,
|
||||
Iterable,
|
||||
MutableMapping,
|
||||
Sequence,
|
||||
)
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Callable,
|
||||
Literal,
|
||||
TypeVar,
|
||||
overload,
|
||||
)
|
||||
|
||||
from pandas.compat._optional import import_optional_dependency
|
||||
|
||||
from pandas.core.dtypes.common import (
|
||||
is_integer,
|
||||
is_list_like,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pandas.io.excel._base import ExcelWriter
|
||||
|
||||
ExcelWriter_t = type[ExcelWriter]
|
||||
usecols_func = TypeVar("usecols_func", bound=Callable[[Hashable], object])
|
||||
|
||||
_writers: MutableMapping[str, ExcelWriter_t] = {}
|
||||
|
||||
|
||||
def register_writer(klass: ExcelWriter_t) -> None:
|
||||
"""
|
||||
Add engine to the excel writer registry.io.excel.
|
||||
|
||||
You must use this method to integrate with ``to_excel``.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
klass : ExcelWriter
|
||||
"""
|
||||
if not callable(klass):
|
||||
raise ValueError("Can only register callables as engines")
|
||||
engine_name = klass._engine
|
||||
_writers[engine_name] = klass
|
||||
|
||||
|
||||
def get_default_engine(ext: str, mode: Literal["reader", "writer"] = "reader") -> str:
|
||||
"""
|
||||
Return the default reader/writer for the given extension.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ext : str
|
||||
The excel file extension for which to get the default engine.
|
||||
mode : str {'reader', 'writer'}
|
||||
Whether to get the default engine for reading or writing.
|
||||
Either 'reader' or 'writer'
|
||||
|
||||
Returns
|
||||
-------
|
||||
str
|
||||
The default engine for the extension.
|
||||
"""
|
||||
_default_readers = {
|
||||
"xlsx": "openpyxl",
|
||||
"xlsm": "openpyxl",
|
||||
"xlsb": "pyxlsb",
|
||||
"xls": "xlrd",
|
||||
"ods": "odf",
|
||||
}
|
||||
_default_writers = {
|
||||
"xlsx": "openpyxl",
|
||||
"xlsm": "openpyxl",
|
||||
"xlsb": "pyxlsb",
|
||||
"ods": "odf",
|
||||
}
|
||||
assert mode in ["reader", "writer"]
|
||||
if mode == "writer":
|
||||
# Prefer xlsxwriter over openpyxl if installed
|
||||
xlsxwriter = import_optional_dependency("xlsxwriter", errors="warn")
|
||||
if xlsxwriter:
|
||||
_default_writers["xlsx"] = "xlsxwriter"
|
||||
return _default_writers[ext]
|
||||
else:
|
||||
return _default_readers[ext]
|
||||
|
||||
|
||||
def get_writer(engine_name: str) -> ExcelWriter_t:
|
||||
try:
|
||||
return _writers[engine_name]
|
||||
except KeyError as err:
|
||||
raise ValueError(f"No Excel writer '{engine_name}'") from err
|
||||
|
||||
|
||||
def _excel2num(x: str) -> int:
|
||||
"""
|
||||
Convert Excel column name like 'AB' to 0-based column index.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : str
|
||||
The Excel column name to convert to a 0-based column index.
|
||||
|
||||
Returns
|
||||
-------
|
||||
num : int
|
||||
The column index corresponding to the name.
|
||||
|
||||
Raises
|
||||
------
|
||||
ValueError
|
||||
Part of the Excel column name was invalid.
|
||||
"""
|
||||
index = 0
|
||||
|
||||
for c in x.upper().strip():
|
||||
cp = ord(c)
|
||||
|
||||
if cp < ord("A") or cp > ord("Z"):
|
||||
raise ValueError(f"Invalid column name: {x}")
|
||||
|
||||
index = index * 26 + cp - ord("A") + 1
|
||||
|
||||
return index - 1
|
||||
|
||||
|
||||
def _range2cols(areas: str) -> list[int]:
|
||||
"""
|
||||
Convert comma separated list of column names and ranges to indices.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
areas : str
|
||||
A string containing a sequence of column ranges (or areas).
|
||||
|
||||
Returns
|
||||
-------
|
||||
cols : list
|
||||
A list of 0-based column indices.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> _range2cols('A:E')
|
||||
[0, 1, 2, 3, 4]
|
||||
>>> _range2cols('A,C,Z:AB')
|
||||
[0, 2, 25, 26, 27]
|
||||
"""
|
||||
cols: list[int] = []
|
||||
|
||||
for rng in areas.split(","):
|
||||
if ":" in rng:
|
||||
rngs = rng.split(":")
|
||||
cols.extend(range(_excel2num(rngs[0]), _excel2num(rngs[1]) + 1))
|
||||
else:
|
||||
cols.append(_excel2num(rng))
|
||||
|
||||
return cols
|
||||
|
||||
|
||||
@overload
|
||||
def maybe_convert_usecols(usecols: str | list[int]) -> list[int]:
|
||||
...
|
||||
|
||||
|
||||
@overload
|
||||
def maybe_convert_usecols(usecols: list[str]) -> list[str]:
|
||||
...
|
||||
|
||||
|
||||
@overload
|
||||
def maybe_convert_usecols(usecols: usecols_func) -> usecols_func:
|
||||
...
|
||||
|
||||
|
||||
@overload
|
||||
def maybe_convert_usecols(usecols: None) -> None:
|
||||
...
|
||||
|
||||
|
||||
def maybe_convert_usecols(
|
||||
usecols: str | list[int] | list[str] | usecols_func | None,
|
||||
) -> None | list[int] | list[str] | usecols_func:
|
||||
"""
|
||||
Convert `usecols` into a compatible format for parsing in `parsers.py`.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
usecols : object
|
||||
The use-columns object to potentially convert.
|
||||
|
||||
Returns
|
||||
-------
|
||||
converted : object
|
||||
The compatible format of `usecols`.
|
||||
"""
|
||||
if usecols is None:
|
||||
return usecols
|
||||
|
||||
if is_integer(usecols):
|
||||
raise ValueError(
|
||||
"Passing an integer for `usecols` is no longer supported. "
|
||||
"Please pass in a list of int from 0 to `usecols` inclusive instead."
|
||||
)
|
||||
|
||||
if isinstance(usecols, str):
|
||||
return _range2cols(usecols)
|
||||
|
||||
return usecols
|
||||
|
||||
|
||||
@overload
|
||||
def validate_freeze_panes(freeze_panes: tuple[int, int]) -> Literal[True]:
|
||||
...
|
||||
|
||||
|
||||
@overload
|
||||
def validate_freeze_panes(freeze_panes: None) -> Literal[False]:
|
||||
...
|
||||
|
||||
|
||||
def validate_freeze_panes(freeze_panes: tuple[int, int] | None) -> bool:
|
||||
if freeze_panes is not None:
|
||||
if len(freeze_panes) == 2 and all(
|
||||
isinstance(item, int) for item in freeze_panes
|
||||
):
|
||||
return True
|
||||
|
||||
raise ValueError(
|
||||
"freeze_panes must be of form (row, column) "
|
||||
"where row and column are integers"
|
||||
)
|
||||
|
||||
# freeze_panes wasn't specified, return False so it won't be applied
|
||||
# to output sheet
|
||||
return False
|
||||
|
||||
|
||||
def fill_mi_header(
|
||||
row: list[Hashable], control_row: list[bool]
|
||||
) -> tuple[list[Hashable], list[bool]]:
|
||||
"""
|
||||
Forward fill blank entries in row but only inside the same parent index.
|
||||
|
||||
Used for creating headers in Multiindex.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
row : list
|
||||
List of items in a single row.
|
||||
control_row : list of bool
|
||||
Helps to determine if particular column is in same parent index as the
|
||||
previous value. Used to stop propagation of empty cells between
|
||||
different indexes.
|
||||
|
||||
Returns
|
||||
-------
|
||||
Returns changed row and control_row
|
||||
"""
|
||||
last = row[0]
|
||||
for i in range(1, len(row)):
|
||||
if not control_row[i]:
|
||||
last = row[i]
|
||||
|
||||
if row[i] == "" or row[i] is None:
|
||||
row[i] = last
|
||||
else:
|
||||
control_row[i] = False
|
||||
last = row[i]
|
||||
|
||||
return row, control_row
|
||||
|
||||
|
||||
def pop_header_name(
|
||||
row: list[Hashable], index_col: int | Sequence[int]
|
||||
) -> tuple[Hashable | None, list[Hashable]]:
|
||||
"""
|
||||
Pop the header name for MultiIndex parsing.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
row : list
|
||||
The data row to parse for the header name.
|
||||
index_col : int, list
|
||||
The index columns for our data. Assumed to be non-null.
|
||||
|
||||
Returns
|
||||
-------
|
||||
header_name : str
|
||||
The extracted header name.
|
||||
trimmed_row : list
|
||||
The original data row with the header name removed.
|
||||
"""
|
||||
# Pop out header name and fill w/blank.
|
||||
if is_list_like(index_col):
|
||||
assert isinstance(index_col, Iterable)
|
||||
i = max(index_col)
|
||||
else:
|
||||
assert not isinstance(index_col, Iterable)
|
||||
i = index_col
|
||||
|
||||
header_name = row[i]
|
||||
header_name = None if header_name == "" else header_name
|
||||
|
||||
return header_name, row[:i] + [""] + row[i + 1 :]
|
||||
|
||||
|
||||
def combine_kwargs(engine_kwargs: dict[str, Any] | None, kwargs: dict) -> dict:
|
||||
"""
|
||||
Used to combine two sources of kwargs for the backend engine.
|
||||
|
||||
Use of kwargs is deprecated, this function is solely for use in 1.3 and should
|
||||
be removed in 1.4/2.0. Also _base.ExcelWriter.__new__ ensures either engine_kwargs
|
||||
or kwargs must be None or empty respectively.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
engine_kwargs: dict
|
||||
kwargs to be passed through to the engine.
|
||||
kwargs: dict
|
||||
kwargs to be psased through to the engine (deprecated)
|
||||
|
||||
Returns
|
||||
-------
|
||||
engine_kwargs combined with kwargs
|
||||
"""
|
||||
if engine_kwargs is None:
|
||||
result = {}
|
||||
else:
|
||||
result = engine_kwargs.copy()
|
||||
result.update(kwargs)
|
||||
return result
|
143
lib/python3.13/site-packages/pandas/io/excel/_xlrd.py
Normal file
143
lib/python3.13/site-packages/pandas/io/excel/_xlrd.py
Normal file
@ -0,0 +1,143 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import time
|
||||
import math
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import numpy as np
|
||||
|
||||
from pandas.compat._optional import import_optional_dependency
|
||||
from pandas.util._decorators import doc
|
||||
|
||||
from pandas.core.shared_docs import _shared_docs
|
||||
|
||||
from pandas.io.excel._base import BaseExcelReader
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from xlrd import Book
|
||||
|
||||
from pandas._typing import (
|
||||
Scalar,
|
||||
StorageOptions,
|
||||
)
|
||||
|
||||
|
||||
class XlrdReader(BaseExcelReader["Book"]):
|
||||
@doc(storage_options=_shared_docs["storage_options"])
|
||||
def __init__(
|
||||
self,
|
||||
filepath_or_buffer,
|
||||
storage_options: StorageOptions | None = None,
|
||||
engine_kwargs: dict | None = None,
|
||||
) -> None:
|
||||
"""
|
||||
Reader using xlrd engine.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
filepath_or_buffer : str, path object or Workbook
|
||||
Object to be parsed.
|
||||
{storage_options}
|
||||
engine_kwargs : dict, optional
|
||||
Arbitrary keyword arguments passed to excel engine.
|
||||
"""
|
||||
err_msg = "Install xlrd >= 2.0.1 for xls Excel support"
|
||||
import_optional_dependency("xlrd", extra=err_msg)
|
||||
super().__init__(
|
||||
filepath_or_buffer,
|
||||
storage_options=storage_options,
|
||||
engine_kwargs=engine_kwargs,
|
||||
)
|
||||
|
||||
@property
|
||||
def _workbook_class(self) -> type[Book]:
|
||||
from xlrd import Book
|
||||
|
||||
return Book
|
||||
|
||||
def load_workbook(self, filepath_or_buffer, engine_kwargs) -> Book:
|
||||
from xlrd import open_workbook
|
||||
|
||||
if hasattr(filepath_or_buffer, "read"):
|
||||
data = filepath_or_buffer.read()
|
||||
return open_workbook(file_contents=data, **engine_kwargs)
|
||||
else:
|
||||
return open_workbook(filepath_or_buffer, **engine_kwargs)
|
||||
|
||||
@property
|
||||
def sheet_names(self):
|
||||
return self.book.sheet_names()
|
||||
|
||||
def get_sheet_by_name(self, name):
|
||||
self.raise_if_bad_sheet_by_name(name)
|
||||
return self.book.sheet_by_name(name)
|
||||
|
||||
def get_sheet_by_index(self, index):
|
||||
self.raise_if_bad_sheet_by_index(index)
|
||||
return self.book.sheet_by_index(index)
|
||||
|
||||
def get_sheet_data(
|
||||
self, sheet, file_rows_needed: int | None = None
|
||||
) -> list[list[Scalar]]:
|
||||
from xlrd import (
|
||||
XL_CELL_BOOLEAN,
|
||||
XL_CELL_DATE,
|
||||
XL_CELL_ERROR,
|
||||
XL_CELL_NUMBER,
|
||||
xldate,
|
||||
)
|
||||
|
||||
epoch1904 = self.book.datemode
|
||||
|
||||
def _parse_cell(cell_contents, cell_typ):
|
||||
"""
|
||||
converts the contents of the cell into a pandas appropriate object
|
||||
"""
|
||||
if cell_typ == XL_CELL_DATE:
|
||||
# Use the newer xlrd datetime handling.
|
||||
try:
|
||||
cell_contents = xldate.xldate_as_datetime(cell_contents, epoch1904)
|
||||
except OverflowError:
|
||||
return cell_contents
|
||||
|
||||
# Excel doesn't distinguish between dates and time,
|
||||
# so we treat dates on the epoch as times only.
|
||||
# Also, Excel supports 1900 and 1904 epochs.
|
||||
year = (cell_contents.timetuple())[0:3]
|
||||
if (not epoch1904 and year == (1899, 12, 31)) or (
|
||||
epoch1904 and year == (1904, 1, 1)
|
||||
):
|
||||
cell_contents = time(
|
||||
cell_contents.hour,
|
||||
cell_contents.minute,
|
||||
cell_contents.second,
|
||||
cell_contents.microsecond,
|
||||
)
|
||||
|
||||
elif cell_typ == XL_CELL_ERROR:
|
||||
cell_contents = np.nan
|
||||
elif cell_typ == XL_CELL_BOOLEAN:
|
||||
cell_contents = bool(cell_contents)
|
||||
elif cell_typ == XL_CELL_NUMBER:
|
||||
# GH5394 - Excel 'numbers' are always floats
|
||||
# it's a minimal perf hit and less surprising
|
||||
if math.isfinite(cell_contents):
|
||||
# GH54564 - don't attempt to convert NaN/Inf
|
||||
val = int(cell_contents)
|
||||
if val == cell_contents:
|
||||
cell_contents = val
|
||||
return cell_contents
|
||||
|
||||
data = []
|
||||
|
||||
nrows = sheet.nrows
|
||||
if file_rows_needed is not None:
|
||||
nrows = min(nrows, file_rows_needed)
|
||||
for i in range(nrows):
|
||||
row = [
|
||||
_parse_cell(value, typ)
|
||||
for value, typ in zip(sheet.row_values(i), sheet.row_types(i))
|
||||
]
|
||||
data.append(row)
|
||||
|
||||
return data
|
284
lib/python3.13/site-packages/pandas/io/excel/_xlsxwriter.py
Normal file
284
lib/python3.13/site-packages/pandas/io/excel/_xlsxwriter.py
Normal file
@ -0,0 +1,284 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
)
|
||||
|
||||
from pandas.io.excel._base import ExcelWriter
|
||||
from pandas.io.excel._util import (
|
||||
combine_kwargs,
|
||||
validate_freeze_panes,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pandas._typing import (
|
||||
ExcelWriterIfSheetExists,
|
||||
FilePath,
|
||||
StorageOptions,
|
||||
WriteExcelBuffer,
|
||||
)
|
||||
|
||||
|
||||
class _XlsxStyler:
|
||||
# Map from openpyxl-oriented styles to flatter xlsxwriter representation
|
||||
# Ordering necessary for both determinism and because some are keyed by
|
||||
# prefixes of others.
|
||||
STYLE_MAPPING: dict[str, list[tuple[tuple[str, ...], str]]] = {
|
||||
"font": [
|
||||
(("name",), "font_name"),
|
||||
(("sz",), "font_size"),
|
||||
(("size",), "font_size"),
|
||||
(("color", "rgb"), "font_color"),
|
||||
(("color",), "font_color"),
|
||||
(("b",), "bold"),
|
||||
(("bold",), "bold"),
|
||||
(("i",), "italic"),
|
||||
(("italic",), "italic"),
|
||||
(("u",), "underline"),
|
||||
(("underline",), "underline"),
|
||||
(("strike",), "font_strikeout"),
|
||||
(("vertAlign",), "font_script"),
|
||||
(("vertalign",), "font_script"),
|
||||
],
|
||||
"number_format": [(("format_code",), "num_format"), ((), "num_format")],
|
||||
"protection": [(("locked",), "locked"), (("hidden",), "hidden")],
|
||||
"alignment": [
|
||||
(("horizontal",), "align"),
|
||||
(("vertical",), "valign"),
|
||||
(("text_rotation",), "rotation"),
|
||||
(("wrap_text",), "text_wrap"),
|
||||
(("indent",), "indent"),
|
||||
(("shrink_to_fit",), "shrink"),
|
||||
],
|
||||
"fill": [
|
||||
(("patternType",), "pattern"),
|
||||
(("patterntype",), "pattern"),
|
||||
(("fill_type",), "pattern"),
|
||||
(("start_color", "rgb"), "fg_color"),
|
||||
(("fgColor", "rgb"), "fg_color"),
|
||||
(("fgcolor", "rgb"), "fg_color"),
|
||||
(("start_color",), "fg_color"),
|
||||
(("fgColor",), "fg_color"),
|
||||
(("fgcolor",), "fg_color"),
|
||||
(("end_color", "rgb"), "bg_color"),
|
||||
(("bgColor", "rgb"), "bg_color"),
|
||||
(("bgcolor", "rgb"), "bg_color"),
|
||||
(("end_color",), "bg_color"),
|
||||
(("bgColor",), "bg_color"),
|
||||
(("bgcolor",), "bg_color"),
|
||||
],
|
||||
"border": [
|
||||
(("color", "rgb"), "border_color"),
|
||||
(("color",), "border_color"),
|
||||
(("style",), "border"),
|
||||
(("top", "color", "rgb"), "top_color"),
|
||||
(("top", "color"), "top_color"),
|
||||
(("top", "style"), "top"),
|
||||
(("top",), "top"),
|
||||
(("right", "color", "rgb"), "right_color"),
|
||||
(("right", "color"), "right_color"),
|
||||
(("right", "style"), "right"),
|
||||
(("right",), "right"),
|
||||
(("bottom", "color", "rgb"), "bottom_color"),
|
||||
(("bottom", "color"), "bottom_color"),
|
||||
(("bottom", "style"), "bottom"),
|
||||
(("bottom",), "bottom"),
|
||||
(("left", "color", "rgb"), "left_color"),
|
||||
(("left", "color"), "left_color"),
|
||||
(("left", "style"), "left"),
|
||||
(("left",), "left"),
|
||||
],
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def convert(cls, style_dict, num_format_str=None):
|
||||
"""
|
||||
converts a style_dict to an xlsxwriter format dict
|
||||
|
||||
Parameters
|
||||
----------
|
||||
style_dict : style dictionary to convert
|
||||
num_format_str : optional number format string
|
||||
"""
|
||||
# Create a XlsxWriter format object.
|
||||
props = {}
|
||||
|
||||
if num_format_str is not None:
|
||||
props["num_format"] = num_format_str
|
||||
|
||||
if style_dict is None:
|
||||
return props
|
||||
|
||||
if "borders" in style_dict:
|
||||
style_dict = style_dict.copy()
|
||||
style_dict["border"] = style_dict.pop("borders")
|
||||
|
||||
for style_group_key, style_group in style_dict.items():
|
||||
for src, dst in cls.STYLE_MAPPING.get(style_group_key, []):
|
||||
# src is a sequence of keys into a nested dict
|
||||
# dst is a flat key
|
||||
if dst in props:
|
||||
continue
|
||||
v = style_group
|
||||
for k in src:
|
||||
try:
|
||||
v = v[k]
|
||||
except (KeyError, TypeError):
|
||||
break
|
||||
else:
|
||||
props[dst] = v
|
||||
|
||||
if isinstance(props.get("pattern"), str):
|
||||
# TODO: support other fill patterns
|
||||
props["pattern"] = 0 if props["pattern"] == "none" else 1
|
||||
|
||||
for k in ["border", "top", "right", "bottom", "left"]:
|
||||
if isinstance(props.get(k), str):
|
||||
try:
|
||||
props[k] = [
|
||||
"none",
|
||||
"thin",
|
||||
"medium",
|
||||
"dashed",
|
||||
"dotted",
|
||||
"thick",
|
||||
"double",
|
||||
"hair",
|
||||
"mediumDashed",
|
||||
"dashDot",
|
||||
"mediumDashDot",
|
||||
"dashDotDot",
|
||||
"mediumDashDotDot",
|
||||
"slantDashDot",
|
||||
].index(props[k])
|
||||
except ValueError:
|
||||
props[k] = 2
|
||||
|
||||
if isinstance(props.get("font_script"), str):
|
||||
props["font_script"] = ["baseline", "superscript", "subscript"].index(
|
||||
props["font_script"]
|
||||
)
|
||||
|
||||
if isinstance(props.get("underline"), str):
|
||||
props["underline"] = {
|
||||
"none": 0,
|
||||
"single": 1,
|
||||
"double": 2,
|
||||
"singleAccounting": 33,
|
||||
"doubleAccounting": 34,
|
||||
}[props["underline"]]
|
||||
|
||||
# GH 30107 - xlsxwriter uses different name
|
||||
if props.get("valign") == "center":
|
||||
props["valign"] = "vcenter"
|
||||
|
||||
return props
|
||||
|
||||
|
||||
class XlsxWriter(ExcelWriter):
|
||||
_engine = "xlsxwriter"
|
||||
_supported_extensions = (".xlsx",)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
path: FilePath | WriteExcelBuffer | ExcelWriter,
|
||||
engine: str | None = None,
|
||||
date_format: str | None = None,
|
||||
datetime_format: str | None = None,
|
||||
mode: str = "w",
|
||||
storage_options: StorageOptions | None = None,
|
||||
if_sheet_exists: ExcelWriterIfSheetExists | None = None,
|
||||
engine_kwargs: dict[str, Any] | None = None,
|
||||
**kwargs,
|
||||
) -> None:
|
||||
# Use the xlsxwriter module as the Excel writer.
|
||||
from xlsxwriter import Workbook
|
||||
|
||||
engine_kwargs = combine_kwargs(engine_kwargs, kwargs)
|
||||
|
||||
if mode == "a":
|
||||
raise ValueError("Append mode is not supported with xlsxwriter!")
|
||||
|
||||
super().__init__(
|
||||
path,
|
||||
engine=engine,
|
||||
date_format=date_format,
|
||||
datetime_format=datetime_format,
|
||||
mode=mode,
|
||||
storage_options=storage_options,
|
||||
if_sheet_exists=if_sheet_exists,
|
||||
engine_kwargs=engine_kwargs,
|
||||
)
|
||||
|
||||
try:
|
||||
self._book = Workbook(self._handles.handle, **engine_kwargs)
|
||||
except TypeError:
|
||||
self._handles.handle.close()
|
||||
raise
|
||||
|
||||
@property
|
||||
def book(self):
|
||||
"""
|
||||
Book instance of class xlsxwriter.Workbook.
|
||||
|
||||
This attribute can be used to access engine-specific features.
|
||||
"""
|
||||
return self._book
|
||||
|
||||
@property
|
||||
def sheets(self) -> dict[str, Any]:
|
||||
result = self.book.sheetnames
|
||||
return result
|
||||
|
||||
def _save(self) -> None:
|
||||
"""
|
||||
Save workbook to disk.
|
||||
"""
|
||||
self.book.close()
|
||||
|
||||
def _write_cells(
|
||||
self,
|
||||
cells,
|
||||
sheet_name: str | None = None,
|
||||
startrow: int = 0,
|
||||
startcol: int = 0,
|
||||
freeze_panes: tuple[int, int] | None = None,
|
||||
) -> None:
|
||||
# Write the frame cells using xlsxwriter.
|
||||
sheet_name = self._get_sheet_name(sheet_name)
|
||||
|
||||
wks = self.book.get_worksheet_by_name(sheet_name)
|
||||
if wks is None:
|
||||
wks = self.book.add_worksheet(sheet_name)
|
||||
|
||||
style_dict = {"null": None}
|
||||
|
||||
if validate_freeze_panes(freeze_panes):
|
||||
wks.freeze_panes(*(freeze_panes))
|
||||
|
||||
for cell in cells:
|
||||
val, fmt = self._value_with_fmt(cell.val)
|
||||
|
||||
stylekey = json.dumps(cell.style)
|
||||
if fmt:
|
||||
stylekey += fmt
|
||||
|
||||
if stylekey in style_dict:
|
||||
style = style_dict[stylekey]
|
||||
else:
|
||||
style = self.book.add_format(_XlsxStyler.convert(cell.style, fmt))
|
||||
style_dict[stylekey] = style
|
||||
|
||||
if cell.mergestart is not None and cell.mergeend is not None:
|
||||
wks.merge_range(
|
||||
startrow + cell.row,
|
||||
startcol + cell.col,
|
||||
startrow + cell.mergestart,
|
||||
startcol + cell.mergeend,
|
||||
val,
|
||||
style,
|
||||
)
|
||||
else:
|
||||
wks.write(startrow + cell.row, startcol + cell.col, val, style)
|
Reference in New Issue
Block a user