Updated script that can be controled by Nodejs web app
This commit is contained in:
@ -0,0 +1,60 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
from . import accessibility
|
||||
from . import animation
|
||||
from . import audits
|
||||
from . import autofill
|
||||
from . import background_service
|
||||
from . import bluetooth_emulation
|
||||
from . import browser
|
||||
from . import css
|
||||
from . import cache_storage
|
||||
from . import cast
|
||||
from . import console
|
||||
from . import dom
|
||||
from . import dom_debugger
|
||||
from . import dom_snapshot
|
||||
from . import dom_storage
|
||||
from . import database
|
||||
from . import debugger
|
||||
from . import device_access
|
||||
from . import device_orientation
|
||||
from . import emulation
|
||||
from . import event_breakpoints
|
||||
from . import extensions
|
||||
from . import fed_cm
|
||||
from . import fetch
|
||||
from . import file_system
|
||||
from . import headless_experimental
|
||||
from . import heap_profiler
|
||||
from . import io
|
||||
from . import indexed_db
|
||||
from . import input_
|
||||
from . import inspector
|
||||
from . import layer_tree
|
||||
from . import log
|
||||
from . import media
|
||||
from . import memory
|
||||
from . import network
|
||||
from . import overlay
|
||||
from . import pwa
|
||||
from . import page
|
||||
from . import performance
|
||||
from . import performance_timeline
|
||||
from . import preload
|
||||
from . import profiler
|
||||
from . import runtime
|
||||
from . import schema
|
||||
from . import security
|
||||
from . import service_worker
|
||||
from . import storage
|
||||
from . import system_info
|
||||
from . import target
|
||||
from . import tethering
|
||||
from . import tracing
|
||||
from . import web_audio
|
||||
from . import web_authn
|
||||
from . import util
|
||||
|
@ -0,0 +1,647 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: Accessibility (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
from . import dom
|
||||
from . import page
|
||||
from . import runtime
|
||||
|
||||
|
||||
class AXNodeId(str):
|
||||
'''
|
||||
Unique accessibility node identifier.
|
||||
'''
|
||||
def to_json(self) -> str:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: str) -> AXNodeId:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'AXNodeId({})'.format(super().__repr__())
|
||||
|
||||
|
||||
class AXValueType(enum.Enum):
|
||||
'''
|
||||
Enum of possible property types.
|
||||
'''
|
||||
BOOLEAN = "boolean"
|
||||
TRISTATE = "tristate"
|
||||
BOOLEAN_OR_UNDEFINED = "booleanOrUndefined"
|
||||
IDREF = "idref"
|
||||
IDREF_LIST = "idrefList"
|
||||
INTEGER = "integer"
|
||||
NODE = "node"
|
||||
NODE_LIST = "nodeList"
|
||||
NUMBER = "number"
|
||||
STRING = "string"
|
||||
COMPUTED_STRING = "computedString"
|
||||
TOKEN = "token"
|
||||
TOKEN_LIST = "tokenList"
|
||||
DOM_RELATION = "domRelation"
|
||||
ROLE = "role"
|
||||
INTERNAL_ROLE = "internalRole"
|
||||
VALUE_UNDEFINED = "valueUndefined"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
class AXValueSourceType(enum.Enum):
|
||||
'''
|
||||
Enum of possible property sources.
|
||||
'''
|
||||
ATTRIBUTE = "attribute"
|
||||
IMPLICIT = "implicit"
|
||||
STYLE = "style"
|
||||
CONTENTS = "contents"
|
||||
PLACEHOLDER = "placeholder"
|
||||
RELATED_ELEMENT = "relatedElement"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
class AXValueNativeSourceType(enum.Enum):
|
||||
'''
|
||||
Enum of possible native property sources (as a subtype of a particular AXValueSourceType).
|
||||
'''
|
||||
DESCRIPTION = "description"
|
||||
FIGCAPTION = "figcaption"
|
||||
LABEL = "label"
|
||||
LABELFOR = "labelfor"
|
||||
LABELWRAPPED = "labelwrapped"
|
||||
LEGEND = "legend"
|
||||
RUBYANNOTATION = "rubyannotation"
|
||||
TABLECAPTION = "tablecaption"
|
||||
TITLE = "title"
|
||||
OTHER = "other"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
@dataclass
|
||||
class AXValueSource:
|
||||
'''
|
||||
A single source for a computed AX property.
|
||||
'''
|
||||
#: What type of source this is.
|
||||
type_: AXValueSourceType
|
||||
|
||||
#: The value of this property source.
|
||||
value: typing.Optional[AXValue] = None
|
||||
|
||||
#: The name of the relevant attribute, if any.
|
||||
attribute: typing.Optional[str] = None
|
||||
|
||||
#: The value of the relevant attribute, if any.
|
||||
attribute_value: typing.Optional[AXValue] = None
|
||||
|
||||
#: Whether this source is superseded by a higher priority source.
|
||||
superseded: typing.Optional[bool] = None
|
||||
|
||||
#: The native markup source for this value, e.g. a ``<label>`` element.
|
||||
native_source: typing.Optional[AXValueNativeSourceType] = None
|
||||
|
||||
#: The value, such as a node or node list, of the native source.
|
||||
native_source_value: typing.Optional[AXValue] = None
|
||||
|
||||
#: Whether the value for this property is invalid.
|
||||
invalid: typing.Optional[bool] = None
|
||||
|
||||
#: Reason for the value being invalid, if it is.
|
||||
invalid_reason: typing.Optional[str] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['type'] = self.type_.to_json()
|
||||
if self.value is not None:
|
||||
json['value'] = self.value.to_json()
|
||||
if self.attribute is not None:
|
||||
json['attribute'] = self.attribute
|
||||
if self.attribute_value is not None:
|
||||
json['attributeValue'] = self.attribute_value.to_json()
|
||||
if self.superseded is not None:
|
||||
json['superseded'] = self.superseded
|
||||
if self.native_source is not None:
|
||||
json['nativeSource'] = self.native_source.to_json()
|
||||
if self.native_source_value is not None:
|
||||
json['nativeSourceValue'] = self.native_source_value.to_json()
|
||||
if self.invalid is not None:
|
||||
json['invalid'] = self.invalid
|
||||
if self.invalid_reason is not None:
|
||||
json['invalidReason'] = self.invalid_reason
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
type_=AXValueSourceType.from_json(json['type']),
|
||||
value=AXValue.from_json(json['value']) if 'value' in json else None,
|
||||
attribute=str(json['attribute']) if 'attribute' in json else None,
|
||||
attribute_value=AXValue.from_json(json['attributeValue']) if 'attributeValue' in json else None,
|
||||
superseded=bool(json['superseded']) if 'superseded' in json else None,
|
||||
native_source=AXValueNativeSourceType.from_json(json['nativeSource']) if 'nativeSource' in json else None,
|
||||
native_source_value=AXValue.from_json(json['nativeSourceValue']) if 'nativeSourceValue' in json else None,
|
||||
invalid=bool(json['invalid']) if 'invalid' in json else None,
|
||||
invalid_reason=str(json['invalidReason']) if 'invalidReason' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class AXRelatedNode:
|
||||
#: The BackendNodeId of the related DOM node.
|
||||
backend_dom_node_id: dom.BackendNodeId
|
||||
|
||||
#: The IDRef value provided, if any.
|
||||
idref: typing.Optional[str] = None
|
||||
|
||||
#: The text alternative of this node in the current context.
|
||||
text: typing.Optional[str] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['backendDOMNodeId'] = self.backend_dom_node_id.to_json()
|
||||
if self.idref is not None:
|
||||
json['idref'] = self.idref
|
||||
if self.text is not None:
|
||||
json['text'] = self.text
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
backend_dom_node_id=dom.BackendNodeId.from_json(json['backendDOMNodeId']),
|
||||
idref=str(json['idref']) if 'idref' in json else None,
|
||||
text=str(json['text']) if 'text' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class AXProperty:
|
||||
#: The name of this property.
|
||||
name: AXPropertyName
|
||||
|
||||
#: The value of this property.
|
||||
value: AXValue
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['name'] = self.name.to_json()
|
||||
json['value'] = self.value.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
name=AXPropertyName.from_json(json['name']),
|
||||
value=AXValue.from_json(json['value']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class AXValue:
|
||||
'''
|
||||
A single computed AX property.
|
||||
'''
|
||||
#: The type of this value.
|
||||
type_: AXValueType
|
||||
|
||||
#: The computed value of this property.
|
||||
value: typing.Optional[typing.Any] = None
|
||||
|
||||
#: One or more related nodes, if applicable.
|
||||
related_nodes: typing.Optional[typing.List[AXRelatedNode]] = None
|
||||
|
||||
#: The sources which contributed to the computation of this property.
|
||||
sources: typing.Optional[typing.List[AXValueSource]] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['type'] = self.type_.to_json()
|
||||
if self.value is not None:
|
||||
json['value'] = self.value
|
||||
if self.related_nodes is not None:
|
||||
json['relatedNodes'] = [i.to_json() for i in self.related_nodes]
|
||||
if self.sources is not None:
|
||||
json['sources'] = [i.to_json() for i in self.sources]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
type_=AXValueType.from_json(json['type']),
|
||||
value=json['value'] if 'value' in json else None,
|
||||
related_nodes=[AXRelatedNode.from_json(i) for i in json['relatedNodes']] if 'relatedNodes' in json else None,
|
||||
sources=[AXValueSource.from_json(i) for i in json['sources']] if 'sources' in json else None,
|
||||
)
|
||||
|
||||
|
||||
class AXPropertyName(enum.Enum):
|
||||
'''
|
||||
Values of AXProperty name:
|
||||
- from 'busy' to 'roledescription': states which apply to every AX node
|
||||
- from 'live' to 'root': attributes which apply to nodes in live regions
|
||||
- from 'autocomplete' to 'valuetext': attributes which apply to widgets
|
||||
- from 'checked' to 'selected': states which apply to widgets
|
||||
- from 'activedescendant' to 'owns' - relationships between elements other than parent/child/sibling.
|
||||
'''
|
||||
BUSY = "busy"
|
||||
DISABLED = "disabled"
|
||||
EDITABLE = "editable"
|
||||
FOCUSABLE = "focusable"
|
||||
FOCUSED = "focused"
|
||||
HIDDEN = "hidden"
|
||||
HIDDEN_ROOT = "hiddenRoot"
|
||||
INVALID = "invalid"
|
||||
KEYSHORTCUTS = "keyshortcuts"
|
||||
SETTABLE = "settable"
|
||||
ROLEDESCRIPTION = "roledescription"
|
||||
LIVE = "live"
|
||||
ATOMIC = "atomic"
|
||||
RELEVANT = "relevant"
|
||||
ROOT = "root"
|
||||
AUTOCOMPLETE = "autocomplete"
|
||||
HAS_POPUP = "hasPopup"
|
||||
LEVEL = "level"
|
||||
MULTISELECTABLE = "multiselectable"
|
||||
ORIENTATION = "orientation"
|
||||
MULTILINE = "multiline"
|
||||
READONLY = "readonly"
|
||||
REQUIRED = "required"
|
||||
VALUEMIN = "valuemin"
|
||||
VALUEMAX = "valuemax"
|
||||
VALUETEXT = "valuetext"
|
||||
CHECKED = "checked"
|
||||
EXPANDED = "expanded"
|
||||
MODAL = "modal"
|
||||
PRESSED = "pressed"
|
||||
SELECTED = "selected"
|
||||
ACTIVEDESCENDANT = "activedescendant"
|
||||
CONTROLS = "controls"
|
||||
DESCRIBEDBY = "describedby"
|
||||
DETAILS = "details"
|
||||
ERRORMESSAGE = "errormessage"
|
||||
FLOWTO = "flowto"
|
||||
LABELLEDBY = "labelledby"
|
||||
OWNS = "owns"
|
||||
URL = "url"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
@dataclass
|
||||
class AXNode:
|
||||
'''
|
||||
A node in the accessibility tree.
|
||||
'''
|
||||
#: Unique identifier for this node.
|
||||
node_id: AXNodeId
|
||||
|
||||
#: Whether this node is ignored for accessibility
|
||||
ignored: bool
|
||||
|
||||
#: Collection of reasons why this node is hidden.
|
||||
ignored_reasons: typing.Optional[typing.List[AXProperty]] = None
|
||||
|
||||
#: This ``Node``'s role, whether explicit or implicit.
|
||||
role: typing.Optional[AXValue] = None
|
||||
|
||||
#: This ``Node``'s Chrome raw role.
|
||||
chrome_role: typing.Optional[AXValue] = None
|
||||
|
||||
#: The accessible name for this ``Node``.
|
||||
name: typing.Optional[AXValue] = None
|
||||
|
||||
#: The accessible description for this ``Node``.
|
||||
description: typing.Optional[AXValue] = None
|
||||
|
||||
#: The value for this ``Node``.
|
||||
value: typing.Optional[AXValue] = None
|
||||
|
||||
#: All other properties
|
||||
properties: typing.Optional[typing.List[AXProperty]] = None
|
||||
|
||||
#: ID for this node's parent.
|
||||
parent_id: typing.Optional[AXNodeId] = None
|
||||
|
||||
#: IDs for each of this node's child nodes.
|
||||
child_ids: typing.Optional[typing.List[AXNodeId]] = None
|
||||
|
||||
#: The backend ID for the associated DOM node, if any.
|
||||
backend_dom_node_id: typing.Optional[dom.BackendNodeId] = None
|
||||
|
||||
#: The frame ID for the frame associated with this nodes document.
|
||||
frame_id: typing.Optional[page.FrameId] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['nodeId'] = self.node_id.to_json()
|
||||
json['ignored'] = self.ignored
|
||||
if self.ignored_reasons is not None:
|
||||
json['ignoredReasons'] = [i.to_json() for i in self.ignored_reasons]
|
||||
if self.role is not None:
|
||||
json['role'] = self.role.to_json()
|
||||
if self.chrome_role is not None:
|
||||
json['chromeRole'] = self.chrome_role.to_json()
|
||||
if self.name is not None:
|
||||
json['name'] = self.name.to_json()
|
||||
if self.description is not None:
|
||||
json['description'] = self.description.to_json()
|
||||
if self.value is not None:
|
||||
json['value'] = self.value.to_json()
|
||||
if self.properties is not None:
|
||||
json['properties'] = [i.to_json() for i in self.properties]
|
||||
if self.parent_id is not None:
|
||||
json['parentId'] = self.parent_id.to_json()
|
||||
if self.child_ids is not None:
|
||||
json['childIds'] = [i.to_json() for i in self.child_ids]
|
||||
if self.backend_dom_node_id is not None:
|
||||
json['backendDOMNodeId'] = self.backend_dom_node_id.to_json()
|
||||
if self.frame_id is not None:
|
||||
json['frameId'] = self.frame_id.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
node_id=AXNodeId.from_json(json['nodeId']),
|
||||
ignored=bool(json['ignored']),
|
||||
ignored_reasons=[AXProperty.from_json(i) for i in json['ignoredReasons']] if 'ignoredReasons' in json else None,
|
||||
role=AXValue.from_json(json['role']) if 'role' in json else None,
|
||||
chrome_role=AXValue.from_json(json['chromeRole']) if 'chromeRole' in json else None,
|
||||
name=AXValue.from_json(json['name']) if 'name' in json else None,
|
||||
description=AXValue.from_json(json['description']) if 'description' in json else None,
|
||||
value=AXValue.from_json(json['value']) if 'value' in json else None,
|
||||
properties=[AXProperty.from_json(i) for i in json['properties']] if 'properties' in json else None,
|
||||
parent_id=AXNodeId.from_json(json['parentId']) if 'parentId' in json else None,
|
||||
child_ids=[AXNodeId.from_json(i) for i in json['childIds']] if 'childIds' in json else None,
|
||||
backend_dom_node_id=dom.BackendNodeId.from_json(json['backendDOMNodeId']) if 'backendDOMNodeId' in json else None,
|
||||
frame_id=page.FrameId.from_json(json['frameId']) if 'frameId' in json else None,
|
||||
)
|
||||
|
||||
|
||||
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Disables the accessibility domain.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Accessibility.disable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def enable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enables the accessibility domain which causes ``AXNodeId``'s to remain consistent between method calls.
|
||||
This turns on accessibility for the page, which can impact performance until accessibility is disabled.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Accessibility.enable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def get_partial_ax_tree(
|
||||
node_id: typing.Optional[dom.NodeId] = None,
|
||||
backend_node_id: typing.Optional[dom.BackendNodeId] = None,
|
||||
object_id: typing.Optional[runtime.RemoteObjectId] = None,
|
||||
fetch_relatives: typing.Optional[bool] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[AXNode]]:
|
||||
'''
|
||||
Fetches the accessibility node and partial accessibility tree for this DOM node, if it exists.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param node_id: *(Optional)* Identifier of the node to get the partial accessibility tree for.
|
||||
:param backend_node_id: *(Optional)* Identifier of the backend node to get the partial accessibility tree for.
|
||||
:param object_id: *(Optional)* JavaScript object id of the node wrapper to get the partial accessibility tree for.
|
||||
:param fetch_relatives: *(Optional)* Whether to fetch this node's ancestors, siblings and children. Defaults to true.
|
||||
:returns: The ``Accessibility.AXNode`` for this DOM node, if it exists, plus its ancestors, siblings and children, if requested.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if node_id is not None:
|
||||
params['nodeId'] = node_id.to_json()
|
||||
if backend_node_id is not None:
|
||||
params['backendNodeId'] = backend_node_id.to_json()
|
||||
if object_id is not None:
|
||||
params['objectId'] = object_id.to_json()
|
||||
if fetch_relatives is not None:
|
||||
params['fetchRelatives'] = fetch_relatives
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Accessibility.getPartialAXTree',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [AXNode.from_json(i) for i in json['nodes']]
|
||||
|
||||
|
||||
def get_full_ax_tree(
|
||||
depth: typing.Optional[int] = None,
|
||||
frame_id: typing.Optional[page.FrameId] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[AXNode]]:
|
||||
'''
|
||||
Fetches the entire accessibility tree for the root Document
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param depth: *(Optional)* The maximum depth at which descendants of the root node should be retrieved. If omitted, the full tree is returned.
|
||||
:param frame_id: *(Optional)* The frame for whose document the AX tree should be retrieved. If omitted, the root frame is used.
|
||||
:returns:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if depth is not None:
|
||||
params['depth'] = depth
|
||||
if frame_id is not None:
|
||||
params['frameId'] = frame_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Accessibility.getFullAXTree',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [AXNode.from_json(i) for i in json['nodes']]
|
||||
|
||||
|
||||
def get_root_ax_node(
|
||||
frame_id: typing.Optional[page.FrameId] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,AXNode]:
|
||||
'''
|
||||
Fetches the root node.
|
||||
Requires ``enable()`` to have been called previously.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param frame_id: *(Optional)* The frame in whose document the node resides. If omitted, the root frame is used.
|
||||
:returns:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if frame_id is not None:
|
||||
params['frameId'] = frame_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Accessibility.getRootAXNode',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return AXNode.from_json(json['node'])
|
||||
|
||||
|
||||
def get_ax_node_and_ancestors(
|
||||
node_id: typing.Optional[dom.NodeId] = None,
|
||||
backend_node_id: typing.Optional[dom.BackendNodeId] = None,
|
||||
object_id: typing.Optional[runtime.RemoteObjectId] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[AXNode]]:
|
||||
'''
|
||||
Fetches a node and all ancestors up to and including the root.
|
||||
Requires ``enable()`` to have been called previously.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param node_id: *(Optional)* Identifier of the node to get.
|
||||
:param backend_node_id: *(Optional)* Identifier of the backend node to get.
|
||||
:param object_id: *(Optional)* JavaScript object id of the node wrapper to get.
|
||||
:returns:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if node_id is not None:
|
||||
params['nodeId'] = node_id.to_json()
|
||||
if backend_node_id is not None:
|
||||
params['backendNodeId'] = backend_node_id.to_json()
|
||||
if object_id is not None:
|
||||
params['objectId'] = object_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Accessibility.getAXNodeAndAncestors',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [AXNode.from_json(i) for i in json['nodes']]
|
||||
|
||||
|
||||
def get_child_ax_nodes(
|
||||
id_: AXNodeId,
|
||||
frame_id: typing.Optional[page.FrameId] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[AXNode]]:
|
||||
'''
|
||||
Fetches a particular accessibility node by AXNodeId.
|
||||
Requires ``enable()`` to have been called previously.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param id_:
|
||||
:param frame_id: *(Optional)* The frame in whose document the node resides. If omitted, the root frame is used.
|
||||
:returns:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['id'] = id_.to_json()
|
||||
if frame_id is not None:
|
||||
params['frameId'] = frame_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Accessibility.getChildAXNodes',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [AXNode.from_json(i) for i in json['nodes']]
|
||||
|
||||
|
||||
def query_ax_tree(
|
||||
node_id: typing.Optional[dom.NodeId] = None,
|
||||
backend_node_id: typing.Optional[dom.BackendNodeId] = None,
|
||||
object_id: typing.Optional[runtime.RemoteObjectId] = None,
|
||||
accessible_name: typing.Optional[str] = None,
|
||||
role: typing.Optional[str] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[AXNode]]:
|
||||
'''
|
||||
Query a DOM node's accessibility subtree for accessible name and role.
|
||||
This command computes the name and role for all nodes in the subtree, including those that are
|
||||
ignored for accessibility, and returns those that match the specified name and role. If no DOM
|
||||
node is specified, or the DOM node does not exist, the command returns an error. If neither
|
||||
``accessibleName`` or ``role`` is specified, it returns all the accessibility nodes in the subtree.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param node_id: *(Optional)* Identifier of the node for the root to query.
|
||||
:param backend_node_id: *(Optional)* Identifier of the backend node for the root to query.
|
||||
:param object_id: *(Optional)* JavaScript object id of the node wrapper for the root to query.
|
||||
:param accessible_name: *(Optional)* Find nodes with this computed name.
|
||||
:param role: *(Optional)* Find nodes with this computed role.
|
||||
:returns: A list of ``Accessibility.AXNode`` matching the specified attributes, including nodes that are ignored for accessibility.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if node_id is not None:
|
||||
params['nodeId'] = node_id.to_json()
|
||||
if backend_node_id is not None:
|
||||
params['backendNodeId'] = backend_node_id.to_json()
|
||||
if object_id is not None:
|
||||
params['objectId'] = object_id.to_json()
|
||||
if accessible_name is not None:
|
||||
params['accessibleName'] = accessible_name
|
||||
if role is not None:
|
||||
params['role'] = role
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Accessibility.queryAXTree',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [AXNode.from_json(i) for i in json['nodes']]
|
||||
|
||||
|
||||
@event_class('Accessibility.loadComplete')
|
||||
@dataclass
|
||||
class LoadComplete:
|
||||
'''
|
||||
**EXPERIMENTAL**
|
||||
|
||||
The loadComplete event mirrors the load complete event sent by the browser to assistive
|
||||
technology when the web page has finished loading.
|
||||
'''
|
||||
#: New document root node.
|
||||
root: AXNode
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> LoadComplete:
|
||||
return cls(
|
||||
root=AXNode.from_json(json['root'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('Accessibility.nodesUpdated')
|
||||
@dataclass
|
||||
class NodesUpdated:
|
||||
'''
|
||||
**EXPERIMENTAL**
|
||||
|
||||
The nodesUpdated event is sent every time a previously requested node has changed the in tree.
|
||||
'''
|
||||
#: Updated node data.
|
||||
nodes: typing.List[AXNode]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> NodesUpdated:
|
||||
return cls(
|
||||
nodes=[AXNode.from_json(i) for i in json['nodes']]
|
||||
)
|
@ -0,0 +1,491 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: Animation (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
from . import dom
|
||||
from . import runtime
|
||||
|
||||
|
||||
@dataclass
|
||||
class Animation:
|
||||
'''
|
||||
Animation instance.
|
||||
'''
|
||||
#: ``Animation``'s id.
|
||||
id_: str
|
||||
|
||||
#: ``Animation``'s name.
|
||||
name: str
|
||||
|
||||
#: ``Animation``'s internal paused state.
|
||||
paused_state: bool
|
||||
|
||||
#: ``Animation``'s play state.
|
||||
play_state: str
|
||||
|
||||
#: ``Animation``'s playback rate.
|
||||
playback_rate: float
|
||||
|
||||
#: ``Animation``'s start time.
|
||||
#: Milliseconds for time based animations and
|
||||
#: percentage [0 - 100] for scroll driven animations
|
||||
#: (i.e. when viewOrScrollTimeline exists).
|
||||
start_time: float
|
||||
|
||||
#: ``Animation``'s current time.
|
||||
current_time: float
|
||||
|
||||
#: Animation type of ``Animation``.
|
||||
type_: str
|
||||
|
||||
#: ``Animation``'s source animation node.
|
||||
source: typing.Optional[AnimationEffect] = None
|
||||
|
||||
#: A unique ID for ``Animation`` representing the sources that triggered this CSS
|
||||
#: animation/transition.
|
||||
css_id: typing.Optional[str] = None
|
||||
|
||||
#: View or scroll timeline
|
||||
view_or_scroll_timeline: typing.Optional[ViewOrScrollTimeline] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['id'] = self.id_
|
||||
json['name'] = self.name
|
||||
json['pausedState'] = self.paused_state
|
||||
json['playState'] = self.play_state
|
||||
json['playbackRate'] = self.playback_rate
|
||||
json['startTime'] = self.start_time
|
||||
json['currentTime'] = self.current_time
|
||||
json['type'] = self.type_
|
||||
if self.source is not None:
|
||||
json['source'] = self.source.to_json()
|
||||
if self.css_id is not None:
|
||||
json['cssId'] = self.css_id
|
||||
if self.view_or_scroll_timeline is not None:
|
||||
json['viewOrScrollTimeline'] = self.view_or_scroll_timeline.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
id_=str(json['id']),
|
||||
name=str(json['name']),
|
||||
paused_state=bool(json['pausedState']),
|
||||
play_state=str(json['playState']),
|
||||
playback_rate=float(json['playbackRate']),
|
||||
start_time=float(json['startTime']),
|
||||
current_time=float(json['currentTime']),
|
||||
type_=str(json['type']),
|
||||
source=AnimationEffect.from_json(json['source']) if 'source' in json else None,
|
||||
css_id=str(json['cssId']) if 'cssId' in json else None,
|
||||
view_or_scroll_timeline=ViewOrScrollTimeline.from_json(json['viewOrScrollTimeline']) if 'viewOrScrollTimeline' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ViewOrScrollTimeline:
|
||||
'''
|
||||
Timeline instance
|
||||
'''
|
||||
#: Orientation of the scroll
|
||||
axis: dom.ScrollOrientation
|
||||
|
||||
#: Scroll container node
|
||||
source_node_id: typing.Optional[dom.BackendNodeId] = None
|
||||
|
||||
#: Represents the starting scroll position of the timeline
|
||||
#: as a length offset in pixels from scroll origin.
|
||||
start_offset: typing.Optional[float] = None
|
||||
|
||||
#: Represents the ending scroll position of the timeline
|
||||
#: as a length offset in pixels from scroll origin.
|
||||
end_offset: typing.Optional[float] = None
|
||||
|
||||
#: The element whose principal box's visibility in the
|
||||
#: scrollport defined the progress of the timeline.
|
||||
#: Does not exist for animations with ScrollTimeline
|
||||
subject_node_id: typing.Optional[dom.BackendNodeId] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['axis'] = self.axis.to_json()
|
||||
if self.source_node_id is not None:
|
||||
json['sourceNodeId'] = self.source_node_id.to_json()
|
||||
if self.start_offset is not None:
|
||||
json['startOffset'] = self.start_offset
|
||||
if self.end_offset is not None:
|
||||
json['endOffset'] = self.end_offset
|
||||
if self.subject_node_id is not None:
|
||||
json['subjectNodeId'] = self.subject_node_id.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
axis=dom.ScrollOrientation.from_json(json['axis']),
|
||||
source_node_id=dom.BackendNodeId.from_json(json['sourceNodeId']) if 'sourceNodeId' in json else None,
|
||||
start_offset=float(json['startOffset']) if 'startOffset' in json else None,
|
||||
end_offset=float(json['endOffset']) if 'endOffset' in json else None,
|
||||
subject_node_id=dom.BackendNodeId.from_json(json['subjectNodeId']) if 'subjectNodeId' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class AnimationEffect:
|
||||
'''
|
||||
AnimationEffect instance
|
||||
'''
|
||||
#: ``AnimationEffect``'s delay.
|
||||
delay: float
|
||||
|
||||
#: ``AnimationEffect``'s end delay.
|
||||
end_delay: float
|
||||
|
||||
#: ``AnimationEffect``'s iteration start.
|
||||
iteration_start: float
|
||||
|
||||
#: ``AnimationEffect``'s iterations.
|
||||
iterations: float
|
||||
|
||||
#: ``AnimationEffect``'s iteration duration.
|
||||
#: Milliseconds for time based animations and
|
||||
#: percentage [0 - 100] for scroll driven animations
|
||||
#: (i.e. when viewOrScrollTimeline exists).
|
||||
duration: float
|
||||
|
||||
#: ``AnimationEffect``'s playback direction.
|
||||
direction: str
|
||||
|
||||
#: ``AnimationEffect``'s fill mode.
|
||||
fill: str
|
||||
|
||||
#: ``AnimationEffect``'s timing function.
|
||||
easing: str
|
||||
|
||||
#: ``AnimationEffect``'s target node.
|
||||
backend_node_id: typing.Optional[dom.BackendNodeId] = None
|
||||
|
||||
#: ``AnimationEffect``'s keyframes.
|
||||
keyframes_rule: typing.Optional[KeyframesRule] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['delay'] = self.delay
|
||||
json['endDelay'] = self.end_delay
|
||||
json['iterationStart'] = self.iteration_start
|
||||
json['iterations'] = self.iterations
|
||||
json['duration'] = self.duration
|
||||
json['direction'] = self.direction
|
||||
json['fill'] = self.fill
|
||||
json['easing'] = self.easing
|
||||
if self.backend_node_id is not None:
|
||||
json['backendNodeId'] = self.backend_node_id.to_json()
|
||||
if self.keyframes_rule is not None:
|
||||
json['keyframesRule'] = self.keyframes_rule.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
delay=float(json['delay']),
|
||||
end_delay=float(json['endDelay']),
|
||||
iteration_start=float(json['iterationStart']),
|
||||
iterations=float(json['iterations']),
|
||||
duration=float(json['duration']),
|
||||
direction=str(json['direction']),
|
||||
fill=str(json['fill']),
|
||||
easing=str(json['easing']),
|
||||
backend_node_id=dom.BackendNodeId.from_json(json['backendNodeId']) if 'backendNodeId' in json else None,
|
||||
keyframes_rule=KeyframesRule.from_json(json['keyframesRule']) if 'keyframesRule' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class KeyframesRule:
|
||||
'''
|
||||
Keyframes Rule
|
||||
'''
|
||||
#: List of animation keyframes.
|
||||
keyframes: typing.List[KeyframeStyle]
|
||||
|
||||
#: CSS keyframed animation's name.
|
||||
name: typing.Optional[str] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['keyframes'] = [i.to_json() for i in self.keyframes]
|
||||
if self.name is not None:
|
||||
json['name'] = self.name
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
keyframes=[KeyframeStyle.from_json(i) for i in json['keyframes']],
|
||||
name=str(json['name']) if 'name' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class KeyframeStyle:
|
||||
'''
|
||||
Keyframe Style
|
||||
'''
|
||||
#: Keyframe's time offset.
|
||||
offset: str
|
||||
|
||||
#: ``AnimationEffect``'s timing function.
|
||||
easing: str
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['offset'] = self.offset
|
||||
json['easing'] = self.easing
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
offset=str(json['offset']),
|
||||
easing=str(json['easing']),
|
||||
)
|
||||
|
||||
|
||||
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Disables animation domain notifications.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Animation.disable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def enable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enables animation domain notifications.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Animation.enable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def get_current_time(
|
||||
id_: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,float]:
|
||||
'''
|
||||
Returns the current time of the an animation.
|
||||
|
||||
:param id_: Id of animation.
|
||||
:returns: Current time of the page.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['id'] = id_
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Animation.getCurrentTime',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return float(json['currentTime'])
|
||||
|
||||
|
||||
def get_playback_rate() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,float]:
|
||||
'''
|
||||
Gets the playback rate of the document timeline.
|
||||
|
||||
:returns: Playback rate for animations on page.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Animation.getPlaybackRate',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return float(json['playbackRate'])
|
||||
|
||||
|
||||
def release_animations(
|
||||
animations: typing.List[str]
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Releases a set of animations to no longer be manipulated.
|
||||
|
||||
:param animations: List of animation ids to seek.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['animations'] = [i for i in animations]
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Animation.releaseAnimations',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def resolve_animation(
|
||||
animation_id: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,runtime.RemoteObject]:
|
||||
'''
|
||||
Gets the remote object of the Animation.
|
||||
|
||||
:param animation_id: Animation id.
|
||||
:returns: Corresponding remote object.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['animationId'] = animation_id
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Animation.resolveAnimation',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return runtime.RemoteObject.from_json(json['remoteObject'])
|
||||
|
||||
|
||||
def seek_animations(
|
||||
animations: typing.List[str],
|
||||
current_time: float
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Seek a set of animations to a particular time within each animation.
|
||||
|
||||
:param animations: List of animation ids to seek.
|
||||
:param current_time: Set the current time of each animation.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['animations'] = [i for i in animations]
|
||||
params['currentTime'] = current_time
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Animation.seekAnimations',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_paused(
|
||||
animations: typing.List[str],
|
||||
paused: bool
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Sets the paused state of a set of animations.
|
||||
|
||||
:param animations: Animations to set the pause state of.
|
||||
:param paused: Paused state to set to.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['animations'] = [i for i in animations]
|
||||
params['paused'] = paused
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Animation.setPaused',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_playback_rate(
|
||||
playback_rate: float
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Sets the playback rate of the document timeline.
|
||||
|
||||
:param playback_rate: Playback rate for animations on page
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['playbackRate'] = playback_rate
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Animation.setPlaybackRate',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_timing(
|
||||
animation_id: str,
|
||||
duration: float,
|
||||
delay: float
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Sets the timing of an animation node.
|
||||
|
||||
:param animation_id: Animation id.
|
||||
:param duration: Duration of the animation.
|
||||
:param delay: Delay of the animation.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['animationId'] = animation_id
|
||||
params['duration'] = duration
|
||||
params['delay'] = delay
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Animation.setTiming',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
@event_class('Animation.animationCanceled')
|
||||
@dataclass
|
||||
class AnimationCanceled:
|
||||
'''
|
||||
Event for when an animation has been cancelled.
|
||||
'''
|
||||
#: Id of the animation that was cancelled.
|
||||
id_: str
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> AnimationCanceled:
|
||||
return cls(
|
||||
id_=str(json['id'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('Animation.animationCreated')
|
||||
@dataclass
|
||||
class AnimationCreated:
|
||||
'''
|
||||
Event for each animation that has been created.
|
||||
'''
|
||||
#: Id of the animation that was created.
|
||||
id_: str
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> AnimationCreated:
|
||||
return cls(
|
||||
id_=str(json['id'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('Animation.animationStarted')
|
||||
@dataclass
|
||||
class AnimationStarted:
|
||||
'''
|
||||
Event for animation that has been started.
|
||||
'''
|
||||
#: Animation that was started.
|
||||
animation: Animation
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> AnimationStarted:
|
||||
return cls(
|
||||
animation=Animation.from_json(json['animation'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('Animation.animationUpdated')
|
||||
@dataclass
|
||||
class AnimationUpdated:
|
||||
'''
|
||||
Event for animation that has been updated.
|
||||
'''
|
||||
#: Animation that was updated.
|
||||
animation: Animation
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> AnimationUpdated:
|
||||
return cls(
|
||||
animation=Animation.from_json(json['animation'])
|
||||
)
|
1533
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/audits.py
Executable file
1533
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/audits.py
Executable file
File diff suppressed because it is too large
Load Diff
283
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/autofill.py
Executable file
283
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/autofill.py
Executable file
@ -0,0 +1,283 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: Autofill (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
from . import dom
|
||||
from . import page
|
||||
|
||||
|
||||
@dataclass
|
||||
class CreditCard:
|
||||
#: 16-digit credit card number.
|
||||
number: str
|
||||
|
||||
#: Name of the credit card owner.
|
||||
name: str
|
||||
|
||||
#: 2-digit expiry month.
|
||||
expiry_month: str
|
||||
|
||||
#: 4-digit expiry year.
|
||||
expiry_year: str
|
||||
|
||||
#: 3-digit card verification code.
|
||||
cvc: str
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['number'] = self.number
|
||||
json['name'] = self.name
|
||||
json['expiryMonth'] = self.expiry_month
|
||||
json['expiryYear'] = self.expiry_year
|
||||
json['cvc'] = self.cvc
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
number=str(json['number']),
|
||||
name=str(json['name']),
|
||||
expiry_month=str(json['expiryMonth']),
|
||||
expiry_year=str(json['expiryYear']),
|
||||
cvc=str(json['cvc']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class AddressField:
|
||||
#: address field name, for example GIVEN_NAME.
|
||||
name: str
|
||||
|
||||
#: address field value, for example Jon Doe.
|
||||
value: str
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['name'] = self.name
|
||||
json['value'] = self.value
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
name=str(json['name']),
|
||||
value=str(json['value']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class AddressFields:
|
||||
'''
|
||||
A list of address fields.
|
||||
'''
|
||||
fields: typing.List[AddressField]
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['fields'] = [i.to_json() for i in self.fields]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
fields=[AddressField.from_json(i) for i in json['fields']],
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class Address:
|
||||
#: fields and values defining an address.
|
||||
fields: typing.List[AddressField]
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['fields'] = [i.to_json() for i in self.fields]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
fields=[AddressField.from_json(i) for i in json['fields']],
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class AddressUI:
|
||||
'''
|
||||
Defines how an address can be displayed like in chrome://settings/addresses.
|
||||
Address UI is a two dimensional array, each inner array is an "address information line", and when rendered in a UI surface should be displayed as such.
|
||||
The following address UI for instance:
|
||||
[[{name: "GIVE_NAME", value: "Jon"}, {name: "FAMILY_NAME", value: "Doe"}], [{name: "CITY", value: "Munich"}, {name: "ZIP", value: "81456"}]]
|
||||
should allow the receiver to render:
|
||||
Jon Doe
|
||||
Munich 81456
|
||||
'''
|
||||
#: A two dimension array containing the representation of values from an address profile.
|
||||
address_fields: typing.List[AddressFields]
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['addressFields'] = [i.to_json() for i in self.address_fields]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
address_fields=[AddressFields.from_json(i) for i in json['addressFields']],
|
||||
)
|
||||
|
||||
|
||||
class FillingStrategy(enum.Enum):
|
||||
'''
|
||||
Specified whether a filled field was done so by using the html autocomplete attribute or autofill heuristics.
|
||||
'''
|
||||
AUTOCOMPLETE_ATTRIBUTE = "autocompleteAttribute"
|
||||
AUTOFILL_INFERRED = "autofillInferred"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
@dataclass
|
||||
class FilledField:
|
||||
#: The type of the field, e.g text, password etc.
|
||||
html_type: str
|
||||
|
||||
#: the html id
|
||||
id_: str
|
||||
|
||||
#: the html name
|
||||
name: str
|
||||
|
||||
#: the field value
|
||||
value: str
|
||||
|
||||
#: The actual field type, e.g FAMILY_NAME
|
||||
autofill_type: str
|
||||
|
||||
#: The filling strategy
|
||||
filling_strategy: FillingStrategy
|
||||
|
||||
#: The frame the field belongs to
|
||||
frame_id: page.FrameId
|
||||
|
||||
#: The form field's DOM node
|
||||
field_id: dom.BackendNodeId
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['htmlType'] = self.html_type
|
||||
json['id'] = self.id_
|
||||
json['name'] = self.name
|
||||
json['value'] = self.value
|
||||
json['autofillType'] = self.autofill_type
|
||||
json['fillingStrategy'] = self.filling_strategy.to_json()
|
||||
json['frameId'] = self.frame_id.to_json()
|
||||
json['fieldId'] = self.field_id.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
html_type=str(json['htmlType']),
|
||||
id_=str(json['id']),
|
||||
name=str(json['name']),
|
||||
value=str(json['value']),
|
||||
autofill_type=str(json['autofillType']),
|
||||
filling_strategy=FillingStrategy.from_json(json['fillingStrategy']),
|
||||
frame_id=page.FrameId.from_json(json['frameId']),
|
||||
field_id=dom.BackendNodeId.from_json(json['fieldId']),
|
||||
)
|
||||
|
||||
|
||||
def trigger(
|
||||
field_id: dom.BackendNodeId,
|
||||
frame_id: typing.Optional[page.FrameId] = None,
|
||||
card: CreditCard = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Trigger autofill on a form identified by the fieldId.
|
||||
If the field and related form cannot be autofilled, returns an error.
|
||||
|
||||
:param field_id: Identifies a field that serves as an anchor for autofill.
|
||||
:param frame_id: *(Optional)* Identifies the frame that field belongs to.
|
||||
:param card: Credit card information to fill out the form. Credit card data is not saved.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['fieldId'] = field_id.to_json()
|
||||
if frame_id is not None:
|
||||
params['frameId'] = frame_id.to_json()
|
||||
params['card'] = card.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Autofill.trigger',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_addresses(
|
||||
addresses: typing.List[Address]
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Set addresses so that developers can verify their forms implementation.
|
||||
|
||||
:param addresses:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['addresses'] = [i.to_json() for i in addresses]
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Autofill.setAddresses',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Disables autofill domain notifications.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Autofill.disable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def enable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enables autofill domain notifications.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Autofill.enable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
@event_class('Autofill.addressFormFilled')
|
||||
@dataclass
|
||||
class AddressFormFilled:
|
||||
'''
|
||||
Emitted when an address form is filled.
|
||||
'''
|
||||
#: Information about the fields that were filled
|
||||
filled_fields: typing.List[FilledField]
|
||||
#: An UI representation of the address used to fill the form.
|
||||
#: Consists of a 2D array where each child represents an address/profile line.
|
||||
address_ui: AddressUI
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> AddressFormFilled:
|
||||
return cls(
|
||||
filled_fields=[FilledField.from_json(i) for i in json['filledFields']],
|
||||
address_ui=AddressUI.from_json(json['addressUi'])
|
||||
)
|
@ -0,0 +1,213 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: BackgroundService (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
from . import network
|
||||
from . import service_worker
|
||||
|
||||
|
||||
class ServiceName(enum.Enum):
|
||||
'''
|
||||
The Background Service that will be associated with the commands/events.
|
||||
Every Background Service operates independently, but they share the same
|
||||
API.
|
||||
'''
|
||||
BACKGROUND_FETCH = "backgroundFetch"
|
||||
BACKGROUND_SYNC = "backgroundSync"
|
||||
PUSH_MESSAGING = "pushMessaging"
|
||||
NOTIFICATIONS = "notifications"
|
||||
PAYMENT_HANDLER = "paymentHandler"
|
||||
PERIODIC_BACKGROUND_SYNC = "periodicBackgroundSync"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
@dataclass
|
||||
class EventMetadata:
|
||||
'''
|
||||
A key-value pair for additional event information to pass along.
|
||||
'''
|
||||
key: str
|
||||
|
||||
value: str
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['key'] = self.key
|
||||
json['value'] = self.value
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
key=str(json['key']),
|
||||
value=str(json['value']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class BackgroundServiceEvent:
|
||||
#: Timestamp of the event (in seconds).
|
||||
timestamp: network.TimeSinceEpoch
|
||||
|
||||
#: The origin this event belongs to.
|
||||
origin: str
|
||||
|
||||
#: The Service Worker ID that initiated the event.
|
||||
service_worker_registration_id: service_worker.RegistrationID
|
||||
|
||||
#: The Background Service this event belongs to.
|
||||
service: ServiceName
|
||||
|
||||
#: A description of the event.
|
||||
event_name: str
|
||||
|
||||
#: An identifier that groups related events together.
|
||||
instance_id: str
|
||||
|
||||
#: A list of event-specific information.
|
||||
event_metadata: typing.List[EventMetadata]
|
||||
|
||||
#: Storage key this event belongs to.
|
||||
storage_key: str
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['timestamp'] = self.timestamp.to_json()
|
||||
json['origin'] = self.origin
|
||||
json['serviceWorkerRegistrationId'] = self.service_worker_registration_id.to_json()
|
||||
json['service'] = self.service.to_json()
|
||||
json['eventName'] = self.event_name
|
||||
json['instanceId'] = self.instance_id
|
||||
json['eventMetadata'] = [i.to_json() for i in self.event_metadata]
|
||||
json['storageKey'] = self.storage_key
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
timestamp=network.TimeSinceEpoch.from_json(json['timestamp']),
|
||||
origin=str(json['origin']),
|
||||
service_worker_registration_id=service_worker.RegistrationID.from_json(json['serviceWorkerRegistrationId']),
|
||||
service=ServiceName.from_json(json['service']),
|
||||
event_name=str(json['eventName']),
|
||||
instance_id=str(json['instanceId']),
|
||||
event_metadata=[EventMetadata.from_json(i) for i in json['eventMetadata']],
|
||||
storage_key=str(json['storageKey']),
|
||||
)
|
||||
|
||||
|
||||
def start_observing(
|
||||
service: ServiceName
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enables event updates for the service.
|
||||
|
||||
:param service:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['service'] = service.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'BackgroundService.startObserving',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def stop_observing(
|
||||
service: ServiceName
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Disables event updates for the service.
|
||||
|
||||
:param service:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['service'] = service.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'BackgroundService.stopObserving',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_recording(
|
||||
should_record: bool,
|
||||
service: ServiceName
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Set the recording state for the service.
|
||||
|
||||
:param should_record:
|
||||
:param service:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['shouldRecord'] = should_record
|
||||
params['service'] = service.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'BackgroundService.setRecording',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def clear_events(
|
||||
service: ServiceName
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Clears all stored data for the service.
|
||||
|
||||
:param service:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['service'] = service.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'BackgroundService.clearEvents',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
@event_class('BackgroundService.recordingStateChanged')
|
||||
@dataclass
|
||||
class RecordingStateChanged:
|
||||
'''
|
||||
Called when the recording state for the service has been updated.
|
||||
'''
|
||||
is_recording: bool
|
||||
service: ServiceName
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> RecordingStateChanged:
|
||||
return cls(
|
||||
is_recording=bool(json['isRecording']),
|
||||
service=ServiceName.from_json(json['service'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('BackgroundService.backgroundServiceEventReceived')
|
||||
@dataclass
|
||||
class BackgroundServiceEventReceived:
|
||||
'''
|
||||
Called with all existing backgroundServiceEvents when enabled, and all new
|
||||
events afterwards if enabled and recording.
|
||||
'''
|
||||
background_service_event: BackgroundServiceEvent
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> BackgroundServiceEventReceived:
|
||||
return cls(
|
||||
background_service_event=BackgroundServiceEvent.from_json(json['backgroundServiceEvent'])
|
||||
)
|
@ -0,0 +1,196 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: BluetoothEmulation (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
|
||||
class CentralState(enum.Enum):
|
||||
'''
|
||||
Indicates the various states of Central.
|
||||
'''
|
||||
ABSENT = "absent"
|
||||
POWERED_OFF = "powered-off"
|
||||
POWERED_ON = "powered-on"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ManufacturerData:
|
||||
'''
|
||||
Stores the manufacturer data
|
||||
'''
|
||||
#: Company identifier
|
||||
#: https://bitbucket.org/bluetooth-SIG/public/src/main/assigned_numbers/company_identifiers/company_identifiers.yaml
|
||||
#: https://usb.org/developers
|
||||
key: int
|
||||
|
||||
#: Manufacturer-specific data
|
||||
data: str
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['key'] = self.key
|
||||
json['data'] = self.data
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
key=int(json['key']),
|
||||
data=str(json['data']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ScanRecord:
|
||||
'''
|
||||
Stores the byte data of the advertisement packet sent by a Bluetooth device.
|
||||
'''
|
||||
name: typing.Optional[str] = None
|
||||
|
||||
uuids: typing.Optional[typing.List[str]] = None
|
||||
|
||||
#: Stores the external appearance description of the device.
|
||||
appearance: typing.Optional[int] = None
|
||||
|
||||
#: Stores the transmission power of a broadcasting device.
|
||||
tx_power: typing.Optional[int] = None
|
||||
|
||||
#: Key is the company identifier and the value is an array of bytes of
|
||||
#: manufacturer specific data.
|
||||
manufacturer_data: typing.Optional[typing.List[ManufacturerData]] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
if self.name is not None:
|
||||
json['name'] = self.name
|
||||
if self.uuids is not None:
|
||||
json['uuids'] = [i for i in self.uuids]
|
||||
if self.appearance is not None:
|
||||
json['appearance'] = self.appearance
|
||||
if self.tx_power is not None:
|
||||
json['txPower'] = self.tx_power
|
||||
if self.manufacturer_data is not None:
|
||||
json['manufacturerData'] = [i.to_json() for i in self.manufacturer_data]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
name=str(json['name']) if 'name' in json else None,
|
||||
uuids=[str(i) for i in json['uuids']] if 'uuids' in json else None,
|
||||
appearance=int(json['appearance']) if 'appearance' in json else None,
|
||||
tx_power=int(json['txPower']) if 'txPower' in json else None,
|
||||
manufacturer_data=[ManufacturerData.from_json(i) for i in json['manufacturerData']] if 'manufacturerData' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ScanEntry:
|
||||
'''
|
||||
Stores the advertisement packet information that is sent by a Bluetooth device.
|
||||
'''
|
||||
device_address: str
|
||||
|
||||
rssi: int
|
||||
|
||||
scan_record: ScanRecord
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['deviceAddress'] = self.device_address
|
||||
json['rssi'] = self.rssi
|
||||
json['scanRecord'] = self.scan_record.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
device_address=str(json['deviceAddress']),
|
||||
rssi=int(json['rssi']),
|
||||
scan_record=ScanRecord.from_json(json['scanRecord']),
|
||||
)
|
||||
|
||||
|
||||
def enable(
|
||||
state: CentralState
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enable the BluetoothEmulation domain.
|
||||
|
||||
:param state: State of the simulated central.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['state'] = state.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'BluetoothEmulation.enable',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Disable the BluetoothEmulation domain.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'BluetoothEmulation.disable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def simulate_preconnected_peripheral(
|
||||
address: str,
|
||||
name: str,
|
||||
manufacturer_data: typing.List[ManufacturerData],
|
||||
known_service_uuids: typing.List[str]
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Simulates a peripheral with ``address``, ``name`` and ``knownServiceUuids``
|
||||
that has already been connected to the system.
|
||||
|
||||
:param address:
|
||||
:param name:
|
||||
:param manufacturer_data:
|
||||
:param known_service_uuids:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['address'] = address
|
||||
params['name'] = name
|
||||
params['manufacturerData'] = [i.to_json() for i in manufacturer_data]
|
||||
params['knownServiceUuids'] = [i for i in known_service_uuids]
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'BluetoothEmulation.simulatePreconnectedPeripheral',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def simulate_advertisement(
|
||||
entry: ScanEntry
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Simulates an advertisement packet described in ``entry`` being received by
|
||||
the central.
|
||||
|
||||
:param entry:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['entry'] = entry.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'BluetoothEmulation.simulateAdvertisement',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
726
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/browser.py
Executable file
726
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/browser.py
Executable file
@ -0,0 +1,726 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: Browser
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
from . import page
|
||||
from . import target
|
||||
|
||||
|
||||
class BrowserContextID(str):
|
||||
def to_json(self) -> str:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: str) -> BrowserContextID:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'BrowserContextID({})'.format(super().__repr__())
|
||||
|
||||
|
||||
class WindowID(int):
|
||||
def to_json(self) -> int:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: int) -> WindowID:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'WindowID({})'.format(super().__repr__())
|
||||
|
||||
|
||||
class WindowState(enum.Enum):
|
||||
'''
|
||||
The state of the browser window.
|
||||
'''
|
||||
NORMAL = "normal"
|
||||
MINIMIZED = "minimized"
|
||||
MAXIMIZED = "maximized"
|
||||
FULLSCREEN = "fullscreen"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
@dataclass
|
||||
class Bounds:
|
||||
'''
|
||||
Browser window bounds information
|
||||
'''
|
||||
#: The offset from the left edge of the screen to the window in pixels.
|
||||
left: typing.Optional[int] = None
|
||||
|
||||
#: The offset from the top edge of the screen to the window in pixels.
|
||||
top: typing.Optional[int] = None
|
||||
|
||||
#: The window width in pixels.
|
||||
width: typing.Optional[int] = None
|
||||
|
||||
#: The window height in pixels.
|
||||
height: typing.Optional[int] = None
|
||||
|
||||
#: The window state. Default to normal.
|
||||
window_state: typing.Optional[WindowState] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
if self.left is not None:
|
||||
json['left'] = self.left
|
||||
if self.top is not None:
|
||||
json['top'] = self.top
|
||||
if self.width is not None:
|
||||
json['width'] = self.width
|
||||
if self.height is not None:
|
||||
json['height'] = self.height
|
||||
if self.window_state is not None:
|
||||
json['windowState'] = self.window_state.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
left=int(json['left']) if 'left' in json else None,
|
||||
top=int(json['top']) if 'top' in json else None,
|
||||
width=int(json['width']) if 'width' in json else None,
|
||||
height=int(json['height']) if 'height' in json else None,
|
||||
window_state=WindowState.from_json(json['windowState']) if 'windowState' in json else None,
|
||||
)
|
||||
|
||||
|
||||
class PermissionType(enum.Enum):
|
||||
ACCESSIBILITY_EVENTS = "accessibilityEvents"
|
||||
AUDIO_CAPTURE = "audioCapture"
|
||||
BACKGROUND_SYNC = "backgroundSync"
|
||||
BACKGROUND_FETCH = "backgroundFetch"
|
||||
CAPTURED_SURFACE_CONTROL = "capturedSurfaceControl"
|
||||
CLIPBOARD_READ_WRITE = "clipboardReadWrite"
|
||||
CLIPBOARD_SANITIZED_WRITE = "clipboardSanitizedWrite"
|
||||
DISPLAY_CAPTURE = "displayCapture"
|
||||
DURABLE_STORAGE = "durableStorage"
|
||||
FLASH = "flash"
|
||||
GEOLOCATION = "geolocation"
|
||||
IDLE_DETECTION = "idleDetection"
|
||||
LOCAL_FONTS = "localFonts"
|
||||
MIDI = "midi"
|
||||
MIDI_SYSEX = "midiSysex"
|
||||
NFC = "nfc"
|
||||
NOTIFICATIONS = "notifications"
|
||||
PAYMENT_HANDLER = "paymentHandler"
|
||||
PERIODIC_BACKGROUND_SYNC = "periodicBackgroundSync"
|
||||
PROTECTED_MEDIA_IDENTIFIER = "protectedMediaIdentifier"
|
||||
SENSORS = "sensors"
|
||||
STORAGE_ACCESS = "storageAccess"
|
||||
SPEAKER_SELECTION = "speakerSelection"
|
||||
TOP_LEVEL_STORAGE_ACCESS = "topLevelStorageAccess"
|
||||
VIDEO_CAPTURE = "videoCapture"
|
||||
VIDEO_CAPTURE_PAN_TILT_ZOOM = "videoCapturePanTiltZoom"
|
||||
WAKE_LOCK_SCREEN = "wakeLockScreen"
|
||||
WAKE_LOCK_SYSTEM = "wakeLockSystem"
|
||||
WEB_APP_INSTALLATION = "webAppInstallation"
|
||||
WINDOW_MANAGEMENT = "windowManagement"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
class PermissionSetting(enum.Enum):
|
||||
GRANTED = "granted"
|
||||
DENIED = "denied"
|
||||
PROMPT = "prompt"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
@dataclass
|
||||
class PermissionDescriptor:
|
||||
'''
|
||||
Definition of PermissionDescriptor defined in the Permissions API:
|
||||
https://w3c.github.io/permissions/#dom-permissiondescriptor.
|
||||
'''
|
||||
#: Name of permission.
|
||||
#: See https://cs.chromium.org/chromium/src/third_party/blink/renderer/modules/permissions/permission_descriptor.idl for valid permission names.
|
||||
name: str
|
||||
|
||||
#: For "midi" permission, may also specify sysex control.
|
||||
sysex: typing.Optional[bool] = None
|
||||
|
||||
#: For "push" permission, may specify userVisibleOnly.
|
||||
#: Note that userVisibleOnly = true is the only currently supported type.
|
||||
user_visible_only: typing.Optional[bool] = None
|
||||
|
||||
#: For "clipboard" permission, may specify allowWithoutSanitization.
|
||||
allow_without_sanitization: typing.Optional[bool] = None
|
||||
|
||||
#: For "fullscreen" permission, must specify allowWithoutGesture:true.
|
||||
allow_without_gesture: typing.Optional[bool] = None
|
||||
|
||||
#: For "camera" permission, may specify panTiltZoom.
|
||||
pan_tilt_zoom: typing.Optional[bool] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['name'] = self.name
|
||||
if self.sysex is not None:
|
||||
json['sysex'] = self.sysex
|
||||
if self.user_visible_only is not None:
|
||||
json['userVisibleOnly'] = self.user_visible_only
|
||||
if self.allow_without_sanitization is not None:
|
||||
json['allowWithoutSanitization'] = self.allow_without_sanitization
|
||||
if self.allow_without_gesture is not None:
|
||||
json['allowWithoutGesture'] = self.allow_without_gesture
|
||||
if self.pan_tilt_zoom is not None:
|
||||
json['panTiltZoom'] = self.pan_tilt_zoom
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
name=str(json['name']),
|
||||
sysex=bool(json['sysex']) if 'sysex' in json else None,
|
||||
user_visible_only=bool(json['userVisibleOnly']) if 'userVisibleOnly' in json else None,
|
||||
allow_without_sanitization=bool(json['allowWithoutSanitization']) if 'allowWithoutSanitization' in json else None,
|
||||
allow_without_gesture=bool(json['allowWithoutGesture']) if 'allowWithoutGesture' in json else None,
|
||||
pan_tilt_zoom=bool(json['panTiltZoom']) if 'panTiltZoom' in json else None,
|
||||
)
|
||||
|
||||
|
||||
class BrowserCommandId(enum.Enum):
|
||||
'''
|
||||
Browser command ids used by executeBrowserCommand.
|
||||
'''
|
||||
OPEN_TAB_SEARCH = "openTabSearch"
|
||||
CLOSE_TAB_SEARCH = "closeTabSearch"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
@dataclass
|
||||
class Bucket:
|
||||
'''
|
||||
Chrome histogram bucket.
|
||||
'''
|
||||
#: Minimum value (inclusive).
|
||||
low: int
|
||||
|
||||
#: Maximum value (exclusive).
|
||||
high: int
|
||||
|
||||
#: Number of samples.
|
||||
count: int
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['low'] = self.low
|
||||
json['high'] = self.high
|
||||
json['count'] = self.count
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
low=int(json['low']),
|
||||
high=int(json['high']),
|
||||
count=int(json['count']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class Histogram:
|
||||
'''
|
||||
Chrome histogram.
|
||||
'''
|
||||
#: Name.
|
||||
name: str
|
||||
|
||||
#: Sum of sample values.
|
||||
sum_: int
|
||||
|
||||
#: Total number of samples.
|
||||
count: int
|
||||
|
||||
#: Buckets.
|
||||
buckets: typing.List[Bucket]
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['name'] = self.name
|
||||
json['sum'] = self.sum_
|
||||
json['count'] = self.count
|
||||
json['buckets'] = [i.to_json() for i in self.buckets]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
name=str(json['name']),
|
||||
sum_=int(json['sum']),
|
||||
count=int(json['count']),
|
||||
buckets=[Bucket.from_json(i) for i in json['buckets']],
|
||||
)
|
||||
|
||||
|
||||
def set_permission(
|
||||
permission: PermissionDescriptor,
|
||||
setting: PermissionSetting,
|
||||
origin: typing.Optional[str] = None,
|
||||
browser_context_id: typing.Optional[BrowserContextID] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Set permission settings for given origin.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param permission: Descriptor of permission to override.
|
||||
:param setting: Setting of the permission.
|
||||
:param origin: *(Optional)* Origin the permission applies to, all origins if not specified.
|
||||
:param browser_context_id: *(Optional)* Context to override. When omitted, default browser context is used.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['permission'] = permission.to_json()
|
||||
params['setting'] = setting.to_json()
|
||||
if origin is not None:
|
||||
params['origin'] = origin
|
||||
if browser_context_id is not None:
|
||||
params['browserContextId'] = browser_context_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Browser.setPermission',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def grant_permissions(
|
||||
permissions: typing.List[PermissionType],
|
||||
origin: typing.Optional[str] = None,
|
||||
browser_context_id: typing.Optional[BrowserContextID] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Grant specific permissions to the given origin and reject all others.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param permissions:
|
||||
:param origin: *(Optional)* Origin the permission applies to, all origins if not specified.
|
||||
:param browser_context_id: *(Optional)* BrowserContext to override permissions. When omitted, default browser context is used.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['permissions'] = [i.to_json() for i in permissions]
|
||||
if origin is not None:
|
||||
params['origin'] = origin
|
||||
if browser_context_id is not None:
|
||||
params['browserContextId'] = browser_context_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Browser.grantPermissions',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def reset_permissions(
|
||||
browser_context_id: typing.Optional[BrowserContextID] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Reset all permission management for all origins.
|
||||
|
||||
:param browser_context_id: *(Optional)* BrowserContext to reset permissions. When omitted, default browser context is used.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if browser_context_id is not None:
|
||||
params['browserContextId'] = browser_context_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Browser.resetPermissions',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_download_behavior(
|
||||
behavior: str,
|
||||
browser_context_id: typing.Optional[BrowserContextID] = None,
|
||||
download_path: typing.Optional[str] = None,
|
||||
events_enabled: typing.Optional[bool] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Set the behavior when downloading a file.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param behavior: Whether to allow all or deny all download requests, or use default Chrome behavior if available (otherwise deny). ``allowAndName`` allows download and names files according to their download guids.
|
||||
:param browser_context_id: *(Optional)* BrowserContext to set download behavior. When omitted, default browser context is used.
|
||||
:param download_path: *(Optional)* The default path to save downloaded files to. This is required if behavior is set to 'allow' or 'allowAndName'.
|
||||
:param events_enabled: *(Optional)* Whether to emit download events (defaults to false).
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['behavior'] = behavior
|
||||
if browser_context_id is not None:
|
||||
params['browserContextId'] = browser_context_id.to_json()
|
||||
if download_path is not None:
|
||||
params['downloadPath'] = download_path
|
||||
if events_enabled is not None:
|
||||
params['eventsEnabled'] = events_enabled
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Browser.setDownloadBehavior',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def cancel_download(
|
||||
guid: str,
|
||||
browser_context_id: typing.Optional[BrowserContextID] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Cancel a download if in progress
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param guid: Global unique identifier of the download.
|
||||
:param browser_context_id: *(Optional)* BrowserContext to perform the action in. When omitted, default browser context is used.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['guid'] = guid
|
||||
if browser_context_id is not None:
|
||||
params['browserContextId'] = browser_context_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Browser.cancelDownload',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def close() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Close browser gracefully.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Browser.close',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def crash() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Crashes browser on the main thread.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Browser.crash',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def crash_gpu_process() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Crashes GPU process.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Browser.crashGpuProcess',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def get_version() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.Tuple[str, str, str, str, str]]:
|
||||
'''
|
||||
Returns version information.
|
||||
|
||||
:returns: A tuple with the following items:
|
||||
|
||||
0. **protocolVersion** - Protocol version.
|
||||
1. **product** - Product name.
|
||||
2. **revision** - Product revision.
|
||||
3. **userAgent** - User-Agent.
|
||||
4. **jsVersion** - V8 version.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Browser.getVersion',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return (
|
||||
str(json['protocolVersion']),
|
||||
str(json['product']),
|
||||
str(json['revision']),
|
||||
str(json['userAgent']),
|
||||
str(json['jsVersion'])
|
||||
)
|
||||
|
||||
|
||||
def get_browser_command_line() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[str]]:
|
||||
'''
|
||||
Returns the command line switches for the browser process if, and only if
|
||||
--enable-automation is on the commandline.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:returns: Commandline parameters
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Browser.getBrowserCommandLine',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [str(i) for i in json['arguments']]
|
||||
|
||||
|
||||
def get_histograms(
|
||||
query: typing.Optional[str] = None,
|
||||
delta: typing.Optional[bool] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[Histogram]]:
|
||||
'''
|
||||
Get Chrome histograms.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param query: *(Optional)* Requested substring in name. Only histograms which have query as a substring in their name are extracted. An empty or absent query returns all histograms.
|
||||
:param delta: *(Optional)* If true, retrieve delta since last delta call.
|
||||
:returns: Histograms.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if query is not None:
|
||||
params['query'] = query
|
||||
if delta is not None:
|
||||
params['delta'] = delta
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Browser.getHistograms',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [Histogram.from_json(i) for i in json['histograms']]
|
||||
|
||||
|
||||
def get_histogram(
|
||||
name: str,
|
||||
delta: typing.Optional[bool] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,Histogram]:
|
||||
'''
|
||||
Get a Chrome histogram by name.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param name: Requested histogram name.
|
||||
:param delta: *(Optional)* If true, retrieve delta since last delta call.
|
||||
:returns: Histogram.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['name'] = name
|
||||
if delta is not None:
|
||||
params['delta'] = delta
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Browser.getHistogram',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return Histogram.from_json(json['histogram'])
|
||||
|
||||
|
||||
def get_window_bounds(
|
||||
window_id: WindowID
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,Bounds]:
|
||||
'''
|
||||
Get position and size of the browser window.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param window_id: Browser window id.
|
||||
:returns: Bounds information of the window. When window state is 'minimized', the restored window position and size are returned.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['windowId'] = window_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Browser.getWindowBounds',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return Bounds.from_json(json['bounds'])
|
||||
|
||||
|
||||
def get_window_for_target(
|
||||
target_id: typing.Optional[target.TargetID] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.Tuple[WindowID, Bounds]]:
|
||||
'''
|
||||
Get the browser window that contains the devtools target.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param target_id: *(Optional)* Devtools agent host id. If called as a part of the session, associated targetId is used.
|
||||
:returns: A tuple with the following items:
|
||||
|
||||
0. **windowId** - Browser window id.
|
||||
1. **bounds** - Bounds information of the window. When window state is 'minimized', the restored window position and size are returned.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if target_id is not None:
|
||||
params['targetId'] = target_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Browser.getWindowForTarget',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return (
|
||||
WindowID.from_json(json['windowId']),
|
||||
Bounds.from_json(json['bounds'])
|
||||
)
|
||||
|
||||
|
||||
def set_window_bounds(
|
||||
window_id: WindowID,
|
||||
bounds: Bounds
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Set position and/or size of the browser window.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param window_id: Browser window id.
|
||||
:param bounds: New window bounds. The 'minimized', 'maximized' and 'fullscreen' states cannot be combined with 'left', 'top', 'width' or 'height'. Leaves unspecified fields unchanged.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['windowId'] = window_id.to_json()
|
||||
params['bounds'] = bounds.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Browser.setWindowBounds',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_dock_tile(
|
||||
badge_label: typing.Optional[str] = None,
|
||||
image: typing.Optional[str] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Set dock tile details, platform-specific.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param badge_label: *(Optional)*
|
||||
:param image: *(Optional)* Png encoded image.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if badge_label is not None:
|
||||
params['badgeLabel'] = badge_label
|
||||
if image is not None:
|
||||
params['image'] = image
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Browser.setDockTile',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def execute_browser_command(
|
||||
command_id: BrowserCommandId
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Invoke custom browser commands used by telemetry.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param command_id:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['commandId'] = command_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Browser.executeBrowserCommand',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def add_privacy_sandbox_enrollment_override(
|
||||
url: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Allows a site to use privacy sandbox features that require enrollment
|
||||
without the site actually being enrolled. Only supported on page targets.
|
||||
|
||||
:param url:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['url'] = url
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Browser.addPrivacySandboxEnrollmentOverride',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
@event_class('Browser.downloadWillBegin')
|
||||
@dataclass
|
||||
class DownloadWillBegin:
|
||||
'''
|
||||
**EXPERIMENTAL**
|
||||
|
||||
Fired when page is about to start a download.
|
||||
'''
|
||||
#: Id of the frame that caused the download to begin.
|
||||
frame_id: page.FrameId
|
||||
#: Global unique identifier of the download.
|
||||
guid: str
|
||||
#: URL of the resource being downloaded.
|
||||
url: str
|
||||
#: Suggested file name of the resource (the actual name of the file saved on disk may differ).
|
||||
suggested_filename: str
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> DownloadWillBegin:
|
||||
return cls(
|
||||
frame_id=page.FrameId.from_json(json['frameId']),
|
||||
guid=str(json['guid']),
|
||||
url=str(json['url']),
|
||||
suggested_filename=str(json['suggestedFilename'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('Browser.downloadProgress')
|
||||
@dataclass
|
||||
class DownloadProgress:
|
||||
'''
|
||||
**EXPERIMENTAL**
|
||||
|
||||
Fired when download makes progress. Last call has ``done`` == true.
|
||||
'''
|
||||
#: Global unique identifier of the download.
|
||||
guid: str
|
||||
#: Total expected bytes to download.
|
||||
total_bytes: float
|
||||
#: Total bytes received.
|
||||
received_bytes: float
|
||||
#: Download status.
|
||||
state: str
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> DownloadProgress:
|
||||
return cls(
|
||||
guid=str(json['guid']),
|
||||
total_bytes=float(json['totalBytes']),
|
||||
received_bytes=float(json['receivedBytes']),
|
||||
state=str(json['state'])
|
||||
)
|
@ -0,0 +1,309 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: CacheStorage (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
from . import storage
|
||||
|
||||
|
||||
class CacheId(str):
|
||||
'''
|
||||
Unique identifier of the Cache object.
|
||||
'''
|
||||
def to_json(self) -> str:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: str) -> CacheId:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'CacheId({})'.format(super().__repr__())
|
||||
|
||||
|
||||
class CachedResponseType(enum.Enum):
|
||||
'''
|
||||
type of HTTP response cached
|
||||
'''
|
||||
BASIC = "basic"
|
||||
CORS = "cors"
|
||||
DEFAULT = "default"
|
||||
ERROR = "error"
|
||||
OPAQUE_RESPONSE = "opaqueResponse"
|
||||
OPAQUE_REDIRECT = "opaqueRedirect"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
@dataclass
|
||||
class DataEntry:
|
||||
'''
|
||||
Data entry.
|
||||
'''
|
||||
#: Request URL.
|
||||
request_url: str
|
||||
|
||||
#: Request method.
|
||||
request_method: str
|
||||
|
||||
#: Request headers
|
||||
request_headers: typing.List[Header]
|
||||
|
||||
#: Number of seconds since epoch.
|
||||
response_time: float
|
||||
|
||||
#: HTTP response status code.
|
||||
response_status: int
|
||||
|
||||
#: HTTP response status text.
|
||||
response_status_text: str
|
||||
|
||||
#: HTTP response type
|
||||
response_type: CachedResponseType
|
||||
|
||||
#: Response headers
|
||||
response_headers: typing.List[Header]
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['requestURL'] = self.request_url
|
||||
json['requestMethod'] = self.request_method
|
||||
json['requestHeaders'] = [i.to_json() for i in self.request_headers]
|
||||
json['responseTime'] = self.response_time
|
||||
json['responseStatus'] = self.response_status
|
||||
json['responseStatusText'] = self.response_status_text
|
||||
json['responseType'] = self.response_type.to_json()
|
||||
json['responseHeaders'] = [i.to_json() for i in self.response_headers]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
request_url=str(json['requestURL']),
|
||||
request_method=str(json['requestMethod']),
|
||||
request_headers=[Header.from_json(i) for i in json['requestHeaders']],
|
||||
response_time=float(json['responseTime']),
|
||||
response_status=int(json['responseStatus']),
|
||||
response_status_text=str(json['responseStatusText']),
|
||||
response_type=CachedResponseType.from_json(json['responseType']),
|
||||
response_headers=[Header.from_json(i) for i in json['responseHeaders']],
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class Cache:
|
||||
'''
|
||||
Cache identifier.
|
||||
'''
|
||||
#: An opaque unique id of the cache.
|
||||
cache_id: CacheId
|
||||
|
||||
#: Security origin of the cache.
|
||||
security_origin: str
|
||||
|
||||
#: Storage key of the cache.
|
||||
storage_key: str
|
||||
|
||||
#: The name of the cache.
|
||||
cache_name: str
|
||||
|
||||
#: Storage bucket of the cache.
|
||||
storage_bucket: typing.Optional[storage.StorageBucket] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['cacheId'] = self.cache_id.to_json()
|
||||
json['securityOrigin'] = self.security_origin
|
||||
json['storageKey'] = self.storage_key
|
||||
json['cacheName'] = self.cache_name
|
||||
if self.storage_bucket is not None:
|
||||
json['storageBucket'] = self.storage_bucket.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
cache_id=CacheId.from_json(json['cacheId']),
|
||||
security_origin=str(json['securityOrigin']),
|
||||
storage_key=str(json['storageKey']),
|
||||
cache_name=str(json['cacheName']),
|
||||
storage_bucket=storage.StorageBucket.from_json(json['storageBucket']) if 'storageBucket' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class Header:
|
||||
name: str
|
||||
|
||||
value: str
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['name'] = self.name
|
||||
json['value'] = self.value
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
name=str(json['name']),
|
||||
value=str(json['value']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class CachedResponse:
|
||||
'''
|
||||
Cached response
|
||||
'''
|
||||
#: Entry content, base64-encoded.
|
||||
body: str
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['body'] = self.body
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
body=str(json['body']),
|
||||
)
|
||||
|
||||
|
||||
def delete_cache(
|
||||
cache_id: CacheId
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Deletes a cache.
|
||||
|
||||
:param cache_id: Id of cache for deletion.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['cacheId'] = cache_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'CacheStorage.deleteCache',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def delete_entry(
|
||||
cache_id: CacheId,
|
||||
request: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Deletes a cache entry.
|
||||
|
||||
:param cache_id: Id of cache where the entry will be deleted.
|
||||
:param request: URL spec of the request.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['cacheId'] = cache_id.to_json()
|
||||
params['request'] = request
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'CacheStorage.deleteEntry',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def request_cache_names(
|
||||
security_origin: typing.Optional[str] = None,
|
||||
storage_key: typing.Optional[str] = None,
|
||||
storage_bucket: typing.Optional[storage.StorageBucket] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[Cache]]:
|
||||
'''
|
||||
Requests cache names.
|
||||
|
||||
:param security_origin: *(Optional)* At least and at most one of securityOrigin, storageKey, storageBucket must be specified. Security origin.
|
||||
:param storage_key: *(Optional)* Storage key.
|
||||
:param storage_bucket: *(Optional)* Storage bucket. If not specified, it uses the default bucket.
|
||||
:returns: Caches for the security origin.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if security_origin is not None:
|
||||
params['securityOrigin'] = security_origin
|
||||
if storage_key is not None:
|
||||
params['storageKey'] = storage_key
|
||||
if storage_bucket is not None:
|
||||
params['storageBucket'] = storage_bucket.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'CacheStorage.requestCacheNames',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [Cache.from_json(i) for i in json['caches']]
|
||||
|
||||
|
||||
def request_cached_response(
|
||||
cache_id: CacheId,
|
||||
request_url: str,
|
||||
request_headers: typing.List[Header]
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,CachedResponse]:
|
||||
'''
|
||||
Fetches cache entry.
|
||||
|
||||
:param cache_id: Id of cache that contains the entry.
|
||||
:param request_url: URL spec of the request.
|
||||
:param request_headers: headers of the request.
|
||||
:returns: Response read from the cache.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['cacheId'] = cache_id.to_json()
|
||||
params['requestURL'] = request_url
|
||||
params['requestHeaders'] = [i.to_json() for i in request_headers]
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'CacheStorage.requestCachedResponse',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return CachedResponse.from_json(json['response'])
|
||||
|
||||
|
||||
def request_entries(
|
||||
cache_id: CacheId,
|
||||
skip_count: typing.Optional[int] = None,
|
||||
page_size: typing.Optional[int] = None,
|
||||
path_filter: typing.Optional[str] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.Tuple[typing.List[DataEntry], float]]:
|
||||
'''
|
||||
Requests data from cache.
|
||||
|
||||
:param cache_id: ID of cache to get entries from.
|
||||
:param skip_count: *(Optional)* Number of records to skip.
|
||||
:param page_size: *(Optional)* Number of records to fetch.
|
||||
:param path_filter: *(Optional)* If present, only return the entries containing this substring in the path
|
||||
:returns: A tuple with the following items:
|
||||
|
||||
0. **cacheDataEntries** - Array of object store data entries.
|
||||
1. **returnCount** - Count of returned entries from this storage. If pathFilter is empty, it is the count of all entries from this storage.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['cacheId'] = cache_id.to_json()
|
||||
if skip_count is not None:
|
||||
params['skipCount'] = skip_count
|
||||
if page_size is not None:
|
||||
params['pageSize'] = page_size
|
||||
if path_filter is not None:
|
||||
params['pathFilter'] = path_filter
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'CacheStorage.requestEntries',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return (
|
||||
[DataEntry.from_json(i) for i in json['cacheDataEntries']],
|
||||
float(json['returnCount'])
|
||||
)
|
170
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/cast.py
Executable file
170
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/cast.py
Executable file
@ -0,0 +1,170 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: Cast (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
|
||||
@dataclass
|
||||
class Sink:
|
||||
name: str
|
||||
|
||||
id_: str
|
||||
|
||||
#: Text describing the current session. Present only if there is an active
|
||||
#: session on the sink.
|
||||
session: typing.Optional[str] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['name'] = self.name
|
||||
json['id'] = self.id_
|
||||
if self.session is not None:
|
||||
json['session'] = self.session
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
name=str(json['name']),
|
||||
id_=str(json['id']),
|
||||
session=str(json['session']) if 'session' in json else None,
|
||||
)
|
||||
|
||||
|
||||
def enable(
|
||||
presentation_url: typing.Optional[str] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Starts observing for sinks that can be used for tab mirroring, and if set,
|
||||
sinks compatible with ``presentationUrl`` as well. When sinks are found, a
|
||||
``sinksUpdated`` event is fired.
|
||||
Also starts observing for issue messages. When an issue is added or removed,
|
||||
an ``issueUpdated`` event is fired.
|
||||
|
||||
:param presentation_url: *(Optional)*
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if presentation_url is not None:
|
||||
params['presentationUrl'] = presentation_url
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Cast.enable',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Stops observing for sinks and issues.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Cast.disable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_sink_to_use(
|
||||
sink_name: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Sets a sink to be used when the web page requests the browser to choose a
|
||||
sink via Presentation API, Remote Playback API, or Cast SDK.
|
||||
|
||||
:param sink_name:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['sinkName'] = sink_name
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Cast.setSinkToUse',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def start_desktop_mirroring(
|
||||
sink_name: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Starts mirroring the desktop to the sink.
|
||||
|
||||
:param sink_name:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['sinkName'] = sink_name
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Cast.startDesktopMirroring',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def start_tab_mirroring(
|
||||
sink_name: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Starts mirroring the tab to the sink.
|
||||
|
||||
:param sink_name:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['sinkName'] = sink_name
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Cast.startTabMirroring',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def stop_casting(
|
||||
sink_name: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Stops the active Cast session on the sink.
|
||||
|
||||
:param sink_name:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['sinkName'] = sink_name
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Cast.stopCasting',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
@event_class('Cast.sinksUpdated')
|
||||
@dataclass
|
||||
class SinksUpdated:
|
||||
'''
|
||||
This is fired whenever the list of available sinks changes. A sink is a
|
||||
device or a software surface that you can cast to.
|
||||
'''
|
||||
sinks: typing.List[Sink]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> SinksUpdated:
|
||||
return cls(
|
||||
sinks=[Sink.from_json(i) for i in json['sinks']]
|
||||
)
|
||||
|
||||
|
||||
@event_class('Cast.issueUpdated')
|
||||
@dataclass
|
||||
class IssueUpdated:
|
||||
'''
|
||||
This is fired whenever the outstanding issue/error message changes.
|
||||
``issueMessage`` is empty if there is no issue.
|
||||
'''
|
||||
issue_message: str
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> IssueUpdated:
|
||||
return cls(
|
||||
issue_message=str(json['issueMessage'])
|
||||
)
|
105
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/console.py
Executable file
105
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/console.py
Executable file
@ -0,0 +1,105 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: Console
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
|
||||
@dataclass
|
||||
class ConsoleMessage:
|
||||
'''
|
||||
Console message.
|
||||
'''
|
||||
#: Message source.
|
||||
source: str
|
||||
|
||||
#: Message severity.
|
||||
level: str
|
||||
|
||||
#: Message text.
|
||||
text: str
|
||||
|
||||
#: URL of the message origin.
|
||||
url: typing.Optional[str] = None
|
||||
|
||||
#: Line number in the resource that generated this message (1-based).
|
||||
line: typing.Optional[int] = None
|
||||
|
||||
#: Column number in the resource that generated this message (1-based).
|
||||
column: typing.Optional[int] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['source'] = self.source
|
||||
json['level'] = self.level
|
||||
json['text'] = self.text
|
||||
if self.url is not None:
|
||||
json['url'] = self.url
|
||||
if self.line is not None:
|
||||
json['line'] = self.line
|
||||
if self.column is not None:
|
||||
json['column'] = self.column
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
source=str(json['source']),
|
||||
level=str(json['level']),
|
||||
text=str(json['text']),
|
||||
url=str(json['url']) if 'url' in json else None,
|
||||
line=int(json['line']) if 'line' in json else None,
|
||||
column=int(json['column']) if 'column' in json else None,
|
||||
)
|
||||
|
||||
|
||||
def clear_messages() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Does nothing.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Console.clearMessages',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Disables console domain, prevents further console messages from being reported to the client.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Console.disable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def enable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enables console domain, sends the messages collected so far to the client by means of the
|
||||
``messageAdded`` notification.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Console.enable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
@event_class('Console.messageAdded')
|
||||
@dataclass
|
||||
class MessageAdded:
|
||||
'''
|
||||
Issued when new console message is added.
|
||||
'''
|
||||
#: Console message that has been added.
|
||||
message: ConsoleMessage
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> MessageAdded:
|
||||
return cls(
|
||||
message=ConsoleMessage.from_json(json['message'])
|
||||
)
|
2168
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/css.py
Executable file
2168
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/css.py
Executable file
File diff suppressed because it is too large
Load Diff
162
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/database.py
Executable file
162
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/database.py
Executable file
@ -0,0 +1,162 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: Database (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
|
||||
class DatabaseId(str):
|
||||
'''
|
||||
Unique identifier of Database object.
|
||||
'''
|
||||
def to_json(self) -> str:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: str) -> DatabaseId:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'DatabaseId({})'.format(super().__repr__())
|
||||
|
||||
|
||||
@dataclass
|
||||
class Database:
|
||||
'''
|
||||
Database object.
|
||||
'''
|
||||
#: Database ID.
|
||||
id_: DatabaseId
|
||||
|
||||
#: Database domain.
|
||||
domain: str
|
||||
|
||||
#: Database name.
|
||||
name: str
|
||||
|
||||
#: Database version.
|
||||
version: str
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['id'] = self.id_.to_json()
|
||||
json['domain'] = self.domain
|
||||
json['name'] = self.name
|
||||
json['version'] = self.version
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
id_=DatabaseId.from_json(json['id']),
|
||||
domain=str(json['domain']),
|
||||
name=str(json['name']),
|
||||
version=str(json['version']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class Error:
|
||||
'''
|
||||
Database error.
|
||||
'''
|
||||
#: Error message.
|
||||
message: str
|
||||
|
||||
#: Error code.
|
||||
code: int
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['message'] = self.message
|
||||
json['code'] = self.code
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
message=str(json['message']),
|
||||
code=int(json['code']),
|
||||
)
|
||||
|
||||
|
||||
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Disables database tracking, prevents database events from being sent to the client.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Database.disable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def enable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enables database tracking, database events will now be delivered to the client.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Database.enable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def execute_sql(
|
||||
database_id: DatabaseId,
|
||||
query: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.Tuple[typing.Optional[typing.List[str]], typing.Optional[typing.List[typing.Any]], typing.Optional[Error]]]:
|
||||
'''
|
||||
:param database_id:
|
||||
:param query:
|
||||
:returns: A tuple with the following items:
|
||||
|
||||
0. **columnNames** -
|
||||
1. **values** -
|
||||
2. **sqlError** -
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['databaseId'] = database_id.to_json()
|
||||
params['query'] = query
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Database.executeSQL',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return (
|
||||
[str(i) for i in json['columnNames']] if 'columnNames' in json else None,
|
||||
[i for i in json['values']] if 'values' in json else None,
|
||||
Error.from_json(json['sqlError']) if 'sqlError' in json else None
|
||||
)
|
||||
|
||||
|
||||
def get_database_table_names(
|
||||
database_id: DatabaseId
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[str]]:
|
||||
'''
|
||||
:param database_id:
|
||||
:returns:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['databaseId'] = database_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Database.getDatabaseTableNames',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [str(i) for i in json['tableNames']]
|
||||
|
||||
|
||||
@event_class('Database.addDatabase')
|
||||
@dataclass
|
||||
class AddDatabase:
|
||||
database: Database
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> AddDatabase:
|
||||
return cls(
|
||||
database=Database.from_json(json['database'])
|
||||
)
|
1336
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/debugger.py
Executable file
1336
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/debugger.py
Executable file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,139 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: DeviceAccess (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
|
||||
class RequestId(str):
|
||||
'''
|
||||
Device request id.
|
||||
'''
|
||||
def to_json(self) -> str:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: str) -> RequestId:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'RequestId({})'.format(super().__repr__())
|
||||
|
||||
|
||||
class DeviceId(str):
|
||||
'''
|
||||
A device id.
|
||||
'''
|
||||
def to_json(self) -> str:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: str) -> DeviceId:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'DeviceId({})'.format(super().__repr__())
|
||||
|
||||
|
||||
@dataclass
|
||||
class PromptDevice:
|
||||
'''
|
||||
Device information displayed in a user prompt to select a device.
|
||||
'''
|
||||
id_: DeviceId
|
||||
|
||||
#: Display name as it appears in a device request user prompt.
|
||||
name: str
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['id'] = self.id_.to_json()
|
||||
json['name'] = self.name
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
id_=DeviceId.from_json(json['id']),
|
||||
name=str(json['name']),
|
||||
)
|
||||
|
||||
|
||||
def enable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enable events in this domain.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DeviceAccess.enable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Disable events in this domain.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DeviceAccess.disable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def select_prompt(
|
||||
id_: RequestId,
|
||||
device_id: DeviceId
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Select a device in response to a DeviceAccess.deviceRequestPrompted event.
|
||||
|
||||
:param id_:
|
||||
:param device_id:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['id'] = id_.to_json()
|
||||
params['deviceId'] = device_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DeviceAccess.selectPrompt',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def cancel_prompt(
|
||||
id_: RequestId
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Cancel a prompt in response to a DeviceAccess.deviceRequestPrompted event.
|
||||
|
||||
:param id_:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['id'] = id_.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DeviceAccess.cancelPrompt',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
@event_class('DeviceAccess.deviceRequestPrompted')
|
||||
@dataclass
|
||||
class DeviceRequestPrompted:
|
||||
'''
|
||||
A device request opened a user prompt to select a device. Respond with the
|
||||
selectPrompt or cancelPrompt command.
|
||||
'''
|
||||
id_: RequestId
|
||||
devices: typing.List[PromptDevice]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> DeviceRequestPrompted:
|
||||
return cls(
|
||||
id_=RequestId.from_json(json['id']),
|
||||
devices=[PromptDevice.from_json(i) for i in json['devices']]
|
||||
)
|
@ -0,0 +1,43 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: DeviceOrientation (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
|
||||
def clear_device_orientation_override() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Clears the overridden Device Orientation.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DeviceOrientation.clearDeviceOrientationOverride',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_device_orientation_override(
|
||||
alpha: float,
|
||||
beta: float,
|
||||
gamma: float
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Overrides the Device Orientation.
|
||||
|
||||
:param alpha: Mock alpha
|
||||
:param beta: Mock beta
|
||||
:param gamma: Mock gamma
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['alpha'] = alpha
|
||||
params['beta'] = beta
|
||||
params['gamma'] = gamma
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DeviceOrientation.setDeviceOrientationOverride',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
2115
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/dom.py
Executable file
2115
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/dom.py
Executable file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,312 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: DOMDebugger
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
from . import dom
|
||||
from . import runtime
|
||||
|
||||
|
||||
class DOMBreakpointType(enum.Enum):
|
||||
'''
|
||||
DOM breakpoint type.
|
||||
'''
|
||||
SUBTREE_MODIFIED = "subtree-modified"
|
||||
ATTRIBUTE_MODIFIED = "attribute-modified"
|
||||
NODE_REMOVED = "node-removed"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
class CSPViolationType(enum.Enum):
|
||||
'''
|
||||
CSP Violation type.
|
||||
'''
|
||||
TRUSTEDTYPE_SINK_VIOLATION = "trustedtype-sink-violation"
|
||||
TRUSTEDTYPE_POLICY_VIOLATION = "trustedtype-policy-violation"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
@dataclass
|
||||
class EventListener:
|
||||
'''
|
||||
Object event listener.
|
||||
'''
|
||||
#: ``EventListener``'s type.
|
||||
type_: str
|
||||
|
||||
#: ``EventListener``'s useCapture.
|
||||
use_capture: bool
|
||||
|
||||
#: ``EventListener``'s passive flag.
|
||||
passive: bool
|
||||
|
||||
#: ``EventListener``'s once flag.
|
||||
once: bool
|
||||
|
||||
#: Script id of the handler code.
|
||||
script_id: runtime.ScriptId
|
||||
|
||||
#: Line number in the script (0-based).
|
||||
line_number: int
|
||||
|
||||
#: Column number in the script (0-based).
|
||||
column_number: int
|
||||
|
||||
#: Event handler function value.
|
||||
handler: typing.Optional[runtime.RemoteObject] = None
|
||||
|
||||
#: Event original handler function value.
|
||||
original_handler: typing.Optional[runtime.RemoteObject] = None
|
||||
|
||||
#: Node the listener is added to (if any).
|
||||
backend_node_id: typing.Optional[dom.BackendNodeId] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['type'] = self.type_
|
||||
json['useCapture'] = self.use_capture
|
||||
json['passive'] = self.passive
|
||||
json['once'] = self.once
|
||||
json['scriptId'] = self.script_id.to_json()
|
||||
json['lineNumber'] = self.line_number
|
||||
json['columnNumber'] = self.column_number
|
||||
if self.handler is not None:
|
||||
json['handler'] = self.handler.to_json()
|
||||
if self.original_handler is not None:
|
||||
json['originalHandler'] = self.original_handler.to_json()
|
||||
if self.backend_node_id is not None:
|
||||
json['backendNodeId'] = self.backend_node_id.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
type_=str(json['type']),
|
||||
use_capture=bool(json['useCapture']),
|
||||
passive=bool(json['passive']),
|
||||
once=bool(json['once']),
|
||||
script_id=runtime.ScriptId.from_json(json['scriptId']),
|
||||
line_number=int(json['lineNumber']),
|
||||
column_number=int(json['columnNumber']),
|
||||
handler=runtime.RemoteObject.from_json(json['handler']) if 'handler' in json else None,
|
||||
original_handler=runtime.RemoteObject.from_json(json['originalHandler']) if 'originalHandler' in json else None,
|
||||
backend_node_id=dom.BackendNodeId.from_json(json['backendNodeId']) if 'backendNodeId' in json else None,
|
||||
)
|
||||
|
||||
|
||||
def get_event_listeners(
|
||||
object_id: runtime.RemoteObjectId,
|
||||
depth: typing.Optional[int] = None,
|
||||
pierce: typing.Optional[bool] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[EventListener]]:
|
||||
'''
|
||||
Returns event listeners of the given object.
|
||||
|
||||
:param object_id: Identifier of the object to return listeners for.
|
||||
:param depth: *(Optional)* The maximum depth at which Node children should be retrieved, defaults to 1. Use -1 for the entire subtree or provide an integer larger than 0.
|
||||
:param pierce: *(Optional)* Whether or not iframes and shadow roots should be traversed when returning the subtree (default is false). Reports listeners for all contexts if pierce is enabled.
|
||||
:returns: Array of relevant listeners.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['objectId'] = object_id.to_json()
|
||||
if depth is not None:
|
||||
params['depth'] = depth
|
||||
if pierce is not None:
|
||||
params['pierce'] = pierce
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMDebugger.getEventListeners',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [EventListener.from_json(i) for i in json['listeners']]
|
||||
|
||||
|
||||
def remove_dom_breakpoint(
|
||||
node_id: dom.NodeId,
|
||||
type_: DOMBreakpointType
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Removes DOM breakpoint that was set using ``setDOMBreakpoint``.
|
||||
|
||||
:param node_id: Identifier of the node to remove breakpoint from.
|
||||
:param type_: Type of the breakpoint to remove.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['nodeId'] = node_id.to_json()
|
||||
params['type'] = type_.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMDebugger.removeDOMBreakpoint',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def remove_event_listener_breakpoint(
|
||||
event_name: str,
|
||||
target_name: typing.Optional[str] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Removes breakpoint on particular DOM event.
|
||||
|
||||
:param event_name: Event name.
|
||||
:param target_name: **(EXPERIMENTAL)** *(Optional)* EventTarget interface name.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['eventName'] = event_name
|
||||
if target_name is not None:
|
||||
params['targetName'] = target_name
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMDebugger.removeEventListenerBreakpoint',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def remove_instrumentation_breakpoint(
|
||||
event_name: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Removes breakpoint on particular native event.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param event_name: Instrumentation name to stop on.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['eventName'] = event_name
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMDebugger.removeInstrumentationBreakpoint',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def remove_xhr_breakpoint(
|
||||
url: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Removes breakpoint from XMLHttpRequest.
|
||||
|
||||
:param url: Resource URL substring.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['url'] = url
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMDebugger.removeXHRBreakpoint',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_break_on_csp_violation(
|
||||
violation_types: typing.List[CSPViolationType]
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Sets breakpoint on particular CSP violations.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param violation_types: CSP Violations to stop upon.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['violationTypes'] = [i.to_json() for i in violation_types]
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMDebugger.setBreakOnCSPViolation',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_dom_breakpoint(
|
||||
node_id: dom.NodeId,
|
||||
type_: DOMBreakpointType
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Sets breakpoint on particular operation with DOM.
|
||||
|
||||
:param node_id: Identifier of the node to set breakpoint on.
|
||||
:param type_: Type of the operation to stop upon.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['nodeId'] = node_id.to_json()
|
||||
params['type'] = type_.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMDebugger.setDOMBreakpoint',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_event_listener_breakpoint(
|
||||
event_name: str,
|
||||
target_name: typing.Optional[str] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Sets breakpoint on particular DOM event.
|
||||
|
||||
:param event_name: DOM Event name to stop on (any DOM event will do).
|
||||
:param target_name: **(EXPERIMENTAL)** *(Optional)* EventTarget interface name to stop on. If equal to ```"*"``` or not provided, will stop on any EventTarget.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['eventName'] = event_name
|
||||
if target_name is not None:
|
||||
params['targetName'] = target_name
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMDebugger.setEventListenerBreakpoint',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_instrumentation_breakpoint(
|
||||
event_name: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Sets breakpoint on particular native event.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param event_name: Instrumentation name to stop on.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['eventName'] = event_name
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMDebugger.setInstrumentationBreakpoint',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_xhr_breakpoint(
|
||||
url: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Sets breakpoint on XMLHttpRequest.
|
||||
|
||||
:param url: Resource URL substring. All XHRs having this substring in the URL will get stopped upon.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['url'] = url
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMDebugger.setXHRBreakpoint',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
@ -0,0 +1,870 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: DOMSnapshot (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
from . import dom
|
||||
from . import dom_debugger
|
||||
from . import page
|
||||
|
||||
|
||||
@dataclass
|
||||
class DOMNode:
|
||||
'''
|
||||
A Node in the DOM tree.
|
||||
'''
|
||||
#: ``Node``'s nodeType.
|
||||
node_type: int
|
||||
|
||||
#: ``Node``'s nodeName.
|
||||
node_name: str
|
||||
|
||||
#: ``Node``'s nodeValue.
|
||||
node_value: str
|
||||
|
||||
#: ``Node``'s id, corresponds to DOM.Node.backendNodeId.
|
||||
backend_node_id: dom.BackendNodeId
|
||||
|
||||
#: Only set for textarea elements, contains the text value.
|
||||
text_value: typing.Optional[str] = None
|
||||
|
||||
#: Only set for input elements, contains the input's associated text value.
|
||||
input_value: typing.Optional[str] = None
|
||||
|
||||
#: Only set for radio and checkbox input elements, indicates if the element has been checked
|
||||
input_checked: typing.Optional[bool] = None
|
||||
|
||||
#: Only set for option elements, indicates if the element has been selected
|
||||
option_selected: typing.Optional[bool] = None
|
||||
|
||||
#: The indexes of the node's child nodes in the ``domNodes`` array returned by ``getSnapshot``, if
|
||||
#: any.
|
||||
child_node_indexes: typing.Optional[typing.List[int]] = None
|
||||
|
||||
#: Attributes of an ``Element`` node.
|
||||
attributes: typing.Optional[typing.List[NameValue]] = None
|
||||
|
||||
#: Indexes of pseudo elements associated with this node in the ``domNodes`` array returned by
|
||||
#: ``getSnapshot``, if any.
|
||||
pseudo_element_indexes: typing.Optional[typing.List[int]] = None
|
||||
|
||||
#: The index of the node's related layout tree node in the ``layoutTreeNodes`` array returned by
|
||||
#: ``getSnapshot``, if any.
|
||||
layout_node_index: typing.Optional[int] = None
|
||||
|
||||
#: Document URL that ``Document`` or ``FrameOwner`` node points to.
|
||||
document_url: typing.Optional[str] = None
|
||||
|
||||
#: Base URL that ``Document`` or ``FrameOwner`` node uses for URL completion.
|
||||
base_url: typing.Optional[str] = None
|
||||
|
||||
#: Only set for documents, contains the document's content language.
|
||||
content_language: typing.Optional[str] = None
|
||||
|
||||
#: Only set for documents, contains the document's character set encoding.
|
||||
document_encoding: typing.Optional[str] = None
|
||||
|
||||
#: ``DocumentType`` node's publicId.
|
||||
public_id: typing.Optional[str] = None
|
||||
|
||||
#: ``DocumentType`` node's systemId.
|
||||
system_id: typing.Optional[str] = None
|
||||
|
||||
#: Frame ID for frame owner elements and also for the document node.
|
||||
frame_id: typing.Optional[page.FrameId] = None
|
||||
|
||||
#: The index of a frame owner element's content document in the ``domNodes`` array returned by
|
||||
#: ``getSnapshot``, if any.
|
||||
content_document_index: typing.Optional[int] = None
|
||||
|
||||
#: Type of a pseudo element node.
|
||||
pseudo_type: typing.Optional[dom.PseudoType] = None
|
||||
|
||||
#: Shadow root type.
|
||||
shadow_root_type: typing.Optional[dom.ShadowRootType] = None
|
||||
|
||||
#: Whether this DOM node responds to mouse clicks. This includes nodes that have had click
|
||||
#: event listeners attached via JavaScript as well as anchor tags that naturally navigate when
|
||||
#: clicked.
|
||||
is_clickable: typing.Optional[bool] = None
|
||||
|
||||
#: Details of the node's event listeners, if any.
|
||||
event_listeners: typing.Optional[typing.List[dom_debugger.EventListener]] = None
|
||||
|
||||
#: The selected url for nodes with a srcset attribute.
|
||||
current_source_url: typing.Optional[str] = None
|
||||
|
||||
#: The url of the script (if any) that generates this node.
|
||||
origin_url: typing.Optional[str] = None
|
||||
|
||||
#: Scroll offsets, set when this node is a Document.
|
||||
scroll_offset_x: typing.Optional[float] = None
|
||||
|
||||
scroll_offset_y: typing.Optional[float] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['nodeType'] = self.node_type
|
||||
json['nodeName'] = self.node_name
|
||||
json['nodeValue'] = self.node_value
|
||||
json['backendNodeId'] = self.backend_node_id.to_json()
|
||||
if self.text_value is not None:
|
||||
json['textValue'] = self.text_value
|
||||
if self.input_value is not None:
|
||||
json['inputValue'] = self.input_value
|
||||
if self.input_checked is not None:
|
||||
json['inputChecked'] = self.input_checked
|
||||
if self.option_selected is not None:
|
||||
json['optionSelected'] = self.option_selected
|
||||
if self.child_node_indexes is not None:
|
||||
json['childNodeIndexes'] = [i for i in self.child_node_indexes]
|
||||
if self.attributes is not None:
|
||||
json['attributes'] = [i.to_json() for i in self.attributes]
|
||||
if self.pseudo_element_indexes is not None:
|
||||
json['pseudoElementIndexes'] = [i for i in self.pseudo_element_indexes]
|
||||
if self.layout_node_index is not None:
|
||||
json['layoutNodeIndex'] = self.layout_node_index
|
||||
if self.document_url is not None:
|
||||
json['documentURL'] = self.document_url
|
||||
if self.base_url is not None:
|
||||
json['baseURL'] = self.base_url
|
||||
if self.content_language is not None:
|
||||
json['contentLanguage'] = self.content_language
|
||||
if self.document_encoding is not None:
|
||||
json['documentEncoding'] = self.document_encoding
|
||||
if self.public_id is not None:
|
||||
json['publicId'] = self.public_id
|
||||
if self.system_id is not None:
|
||||
json['systemId'] = self.system_id
|
||||
if self.frame_id is not None:
|
||||
json['frameId'] = self.frame_id.to_json()
|
||||
if self.content_document_index is not None:
|
||||
json['contentDocumentIndex'] = self.content_document_index
|
||||
if self.pseudo_type is not None:
|
||||
json['pseudoType'] = self.pseudo_type.to_json()
|
||||
if self.shadow_root_type is not None:
|
||||
json['shadowRootType'] = self.shadow_root_type.to_json()
|
||||
if self.is_clickable is not None:
|
||||
json['isClickable'] = self.is_clickable
|
||||
if self.event_listeners is not None:
|
||||
json['eventListeners'] = [i.to_json() for i in self.event_listeners]
|
||||
if self.current_source_url is not None:
|
||||
json['currentSourceURL'] = self.current_source_url
|
||||
if self.origin_url is not None:
|
||||
json['originURL'] = self.origin_url
|
||||
if self.scroll_offset_x is not None:
|
||||
json['scrollOffsetX'] = self.scroll_offset_x
|
||||
if self.scroll_offset_y is not None:
|
||||
json['scrollOffsetY'] = self.scroll_offset_y
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
node_type=int(json['nodeType']),
|
||||
node_name=str(json['nodeName']),
|
||||
node_value=str(json['nodeValue']),
|
||||
backend_node_id=dom.BackendNodeId.from_json(json['backendNodeId']),
|
||||
text_value=str(json['textValue']) if 'textValue' in json else None,
|
||||
input_value=str(json['inputValue']) if 'inputValue' in json else None,
|
||||
input_checked=bool(json['inputChecked']) if 'inputChecked' in json else None,
|
||||
option_selected=bool(json['optionSelected']) if 'optionSelected' in json else None,
|
||||
child_node_indexes=[int(i) for i in json['childNodeIndexes']] if 'childNodeIndexes' in json else None,
|
||||
attributes=[NameValue.from_json(i) for i in json['attributes']] if 'attributes' in json else None,
|
||||
pseudo_element_indexes=[int(i) for i in json['pseudoElementIndexes']] if 'pseudoElementIndexes' in json else None,
|
||||
layout_node_index=int(json['layoutNodeIndex']) if 'layoutNodeIndex' in json else None,
|
||||
document_url=str(json['documentURL']) if 'documentURL' in json else None,
|
||||
base_url=str(json['baseURL']) if 'baseURL' in json else None,
|
||||
content_language=str(json['contentLanguage']) if 'contentLanguage' in json else None,
|
||||
document_encoding=str(json['documentEncoding']) if 'documentEncoding' in json else None,
|
||||
public_id=str(json['publicId']) if 'publicId' in json else None,
|
||||
system_id=str(json['systemId']) if 'systemId' in json else None,
|
||||
frame_id=page.FrameId.from_json(json['frameId']) if 'frameId' in json else None,
|
||||
content_document_index=int(json['contentDocumentIndex']) if 'contentDocumentIndex' in json else None,
|
||||
pseudo_type=dom.PseudoType.from_json(json['pseudoType']) if 'pseudoType' in json else None,
|
||||
shadow_root_type=dom.ShadowRootType.from_json(json['shadowRootType']) if 'shadowRootType' in json else None,
|
||||
is_clickable=bool(json['isClickable']) if 'isClickable' in json else None,
|
||||
event_listeners=[dom_debugger.EventListener.from_json(i) for i in json['eventListeners']] if 'eventListeners' in json else None,
|
||||
current_source_url=str(json['currentSourceURL']) if 'currentSourceURL' in json else None,
|
||||
origin_url=str(json['originURL']) if 'originURL' in json else None,
|
||||
scroll_offset_x=float(json['scrollOffsetX']) if 'scrollOffsetX' in json else None,
|
||||
scroll_offset_y=float(json['scrollOffsetY']) if 'scrollOffsetY' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class InlineTextBox:
|
||||
'''
|
||||
Details of post layout rendered text positions. The exact layout should not be regarded as
|
||||
stable and may change between versions.
|
||||
'''
|
||||
#: The bounding box in document coordinates. Note that scroll offset of the document is ignored.
|
||||
bounding_box: dom.Rect
|
||||
|
||||
#: The starting index in characters, for this post layout textbox substring. Characters that
|
||||
#: would be represented as a surrogate pair in UTF-16 have length 2.
|
||||
start_character_index: int
|
||||
|
||||
#: The number of characters in this post layout textbox substring. Characters that would be
|
||||
#: represented as a surrogate pair in UTF-16 have length 2.
|
||||
num_characters: int
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['boundingBox'] = self.bounding_box.to_json()
|
||||
json['startCharacterIndex'] = self.start_character_index
|
||||
json['numCharacters'] = self.num_characters
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
bounding_box=dom.Rect.from_json(json['boundingBox']),
|
||||
start_character_index=int(json['startCharacterIndex']),
|
||||
num_characters=int(json['numCharacters']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class LayoutTreeNode:
|
||||
'''
|
||||
Details of an element in the DOM tree with a LayoutObject.
|
||||
'''
|
||||
#: The index of the related DOM node in the ``domNodes`` array returned by ``getSnapshot``.
|
||||
dom_node_index: int
|
||||
|
||||
#: The bounding box in document coordinates. Note that scroll offset of the document is ignored.
|
||||
bounding_box: dom.Rect
|
||||
|
||||
#: Contents of the LayoutText, if any.
|
||||
layout_text: typing.Optional[str] = None
|
||||
|
||||
#: The post-layout inline text nodes, if any.
|
||||
inline_text_nodes: typing.Optional[typing.List[InlineTextBox]] = None
|
||||
|
||||
#: Index into the ``computedStyles`` array returned by ``getSnapshot``.
|
||||
style_index: typing.Optional[int] = None
|
||||
|
||||
#: Global paint order index, which is determined by the stacking order of the nodes. Nodes
|
||||
#: that are painted together will have the same index. Only provided if includePaintOrder in
|
||||
#: getSnapshot was true.
|
||||
paint_order: typing.Optional[int] = None
|
||||
|
||||
#: Set to true to indicate the element begins a new stacking context.
|
||||
is_stacking_context: typing.Optional[bool] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['domNodeIndex'] = self.dom_node_index
|
||||
json['boundingBox'] = self.bounding_box.to_json()
|
||||
if self.layout_text is not None:
|
||||
json['layoutText'] = self.layout_text
|
||||
if self.inline_text_nodes is not None:
|
||||
json['inlineTextNodes'] = [i.to_json() for i in self.inline_text_nodes]
|
||||
if self.style_index is not None:
|
||||
json['styleIndex'] = self.style_index
|
||||
if self.paint_order is not None:
|
||||
json['paintOrder'] = self.paint_order
|
||||
if self.is_stacking_context is not None:
|
||||
json['isStackingContext'] = self.is_stacking_context
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
dom_node_index=int(json['domNodeIndex']),
|
||||
bounding_box=dom.Rect.from_json(json['boundingBox']),
|
||||
layout_text=str(json['layoutText']) if 'layoutText' in json else None,
|
||||
inline_text_nodes=[InlineTextBox.from_json(i) for i in json['inlineTextNodes']] if 'inlineTextNodes' in json else None,
|
||||
style_index=int(json['styleIndex']) if 'styleIndex' in json else None,
|
||||
paint_order=int(json['paintOrder']) if 'paintOrder' in json else None,
|
||||
is_stacking_context=bool(json['isStackingContext']) if 'isStackingContext' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ComputedStyle:
|
||||
'''
|
||||
A subset of the full ComputedStyle as defined by the request whitelist.
|
||||
'''
|
||||
#: Name/value pairs of computed style properties.
|
||||
properties: typing.List[NameValue]
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['properties'] = [i.to_json() for i in self.properties]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
properties=[NameValue.from_json(i) for i in json['properties']],
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class NameValue:
|
||||
'''
|
||||
A name/value pair.
|
||||
'''
|
||||
#: Attribute/property name.
|
||||
name: str
|
||||
|
||||
#: Attribute/property value.
|
||||
value: str
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['name'] = self.name
|
||||
json['value'] = self.value
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
name=str(json['name']),
|
||||
value=str(json['value']),
|
||||
)
|
||||
|
||||
|
||||
class StringIndex(int):
|
||||
'''
|
||||
Index of the string in the strings table.
|
||||
'''
|
||||
def to_json(self) -> int:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: int) -> StringIndex:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'StringIndex({})'.format(super().__repr__())
|
||||
|
||||
|
||||
class ArrayOfStrings(list):
|
||||
'''
|
||||
Index of the string in the strings table.
|
||||
'''
|
||||
def to_json(self) -> typing.List[StringIndex]:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: typing.List[StringIndex]) -> ArrayOfStrings:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'ArrayOfStrings({})'.format(super().__repr__())
|
||||
|
||||
|
||||
@dataclass
|
||||
class RareStringData:
|
||||
'''
|
||||
Data that is only present on rare nodes.
|
||||
'''
|
||||
index: typing.List[int]
|
||||
|
||||
value: typing.List[StringIndex]
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['index'] = [i for i in self.index]
|
||||
json['value'] = [i.to_json() for i in self.value]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
index=[int(i) for i in json['index']],
|
||||
value=[StringIndex.from_json(i) for i in json['value']],
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class RareBooleanData:
|
||||
index: typing.List[int]
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['index'] = [i for i in self.index]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
index=[int(i) for i in json['index']],
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class RareIntegerData:
|
||||
index: typing.List[int]
|
||||
|
||||
value: typing.List[int]
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['index'] = [i for i in self.index]
|
||||
json['value'] = [i for i in self.value]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
index=[int(i) for i in json['index']],
|
||||
value=[int(i) for i in json['value']],
|
||||
)
|
||||
|
||||
|
||||
class Rectangle(list):
|
||||
def to_json(self) -> typing.List[float]:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: typing.List[float]) -> Rectangle:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'Rectangle({})'.format(super().__repr__())
|
||||
|
||||
|
||||
@dataclass
|
||||
class DocumentSnapshot:
|
||||
'''
|
||||
Document snapshot.
|
||||
'''
|
||||
#: Document URL that ``Document`` or ``FrameOwner`` node points to.
|
||||
document_url: StringIndex
|
||||
|
||||
#: Document title.
|
||||
title: StringIndex
|
||||
|
||||
#: Base URL that ``Document`` or ``FrameOwner`` node uses for URL completion.
|
||||
base_url: StringIndex
|
||||
|
||||
#: Contains the document's content language.
|
||||
content_language: StringIndex
|
||||
|
||||
#: Contains the document's character set encoding.
|
||||
encoding_name: StringIndex
|
||||
|
||||
#: ``DocumentType`` node's publicId.
|
||||
public_id: StringIndex
|
||||
|
||||
#: ``DocumentType`` node's systemId.
|
||||
system_id: StringIndex
|
||||
|
||||
#: Frame ID for frame owner elements and also for the document node.
|
||||
frame_id: StringIndex
|
||||
|
||||
#: A table with dom nodes.
|
||||
nodes: NodeTreeSnapshot
|
||||
|
||||
#: The nodes in the layout tree.
|
||||
layout: LayoutTreeSnapshot
|
||||
|
||||
#: The post-layout inline text nodes.
|
||||
text_boxes: TextBoxSnapshot
|
||||
|
||||
#: Horizontal scroll offset.
|
||||
scroll_offset_x: typing.Optional[float] = None
|
||||
|
||||
#: Vertical scroll offset.
|
||||
scroll_offset_y: typing.Optional[float] = None
|
||||
|
||||
#: Document content width.
|
||||
content_width: typing.Optional[float] = None
|
||||
|
||||
#: Document content height.
|
||||
content_height: typing.Optional[float] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['documentURL'] = self.document_url.to_json()
|
||||
json['title'] = self.title.to_json()
|
||||
json['baseURL'] = self.base_url.to_json()
|
||||
json['contentLanguage'] = self.content_language.to_json()
|
||||
json['encodingName'] = self.encoding_name.to_json()
|
||||
json['publicId'] = self.public_id.to_json()
|
||||
json['systemId'] = self.system_id.to_json()
|
||||
json['frameId'] = self.frame_id.to_json()
|
||||
json['nodes'] = self.nodes.to_json()
|
||||
json['layout'] = self.layout.to_json()
|
||||
json['textBoxes'] = self.text_boxes.to_json()
|
||||
if self.scroll_offset_x is not None:
|
||||
json['scrollOffsetX'] = self.scroll_offset_x
|
||||
if self.scroll_offset_y is not None:
|
||||
json['scrollOffsetY'] = self.scroll_offset_y
|
||||
if self.content_width is not None:
|
||||
json['contentWidth'] = self.content_width
|
||||
if self.content_height is not None:
|
||||
json['contentHeight'] = self.content_height
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
document_url=StringIndex.from_json(json['documentURL']),
|
||||
title=StringIndex.from_json(json['title']),
|
||||
base_url=StringIndex.from_json(json['baseURL']),
|
||||
content_language=StringIndex.from_json(json['contentLanguage']),
|
||||
encoding_name=StringIndex.from_json(json['encodingName']),
|
||||
public_id=StringIndex.from_json(json['publicId']),
|
||||
system_id=StringIndex.from_json(json['systemId']),
|
||||
frame_id=StringIndex.from_json(json['frameId']),
|
||||
nodes=NodeTreeSnapshot.from_json(json['nodes']),
|
||||
layout=LayoutTreeSnapshot.from_json(json['layout']),
|
||||
text_boxes=TextBoxSnapshot.from_json(json['textBoxes']),
|
||||
scroll_offset_x=float(json['scrollOffsetX']) if 'scrollOffsetX' in json else None,
|
||||
scroll_offset_y=float(json['scrollOffsetY']) if 'scrollOffsetY' in json else None,
|
||||
content_width=float(json['contentWidth']) if 'contentWidth' in json else None,
|
||||
content_height=float(json['contentHeight']) if 'contentHeight' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class NodeTreeSnapshot:
|
||||
'''
|
||||
Table containing nodes.
|
||||
'''
|
||||
#: Parent node index.
|
||||
parent_index: typing.Optional[typing.List[int]] = None
|
||||
|
||||
#: ``Node``'s nodeType.
|
||||
node_type: typing.Optional[typing.List[int]] = None
|
||||
|
||||
#: Type of the shadow root the ``Node`` is in. String values are equal to the ``ShadowRootType`` enum.
|
||||
shadow_root_type: typing.Optional[RareStringData] = None
|
||||
|
||||
#: ``Node``'s nodeName.
|
||||
node_name: typing.Optional[typing.List[StringIndex]] = None
|
||||
|
||||
#: ``Node``'s nodeValue.
|
||||
node_value: typing.Optional[typing.List[StringIndex]] = None
|
||||
|
||||
#: ``Node``'s id, corresponds to DOM.Node.backendNodeId.
|
||||
backend_node_id: typing.Optional[typing.List[dom.BackendNodeId]] = None
|
||||
|
||||
#: Attributes of an ``Element`` node. Flatten name, value pairs.
|
||||
attributes: typing.Optional[typing.List[ArrayOfStrings]] = None
|
||||
|
||||
#: Only set for textarea elements, contains the text value.
|
||||
text_value: typing.Optional[RareStringData] = None
|
||||
|
||||
#: Only set for input elements, contains the input's associated text value.
|
||||
input_value: typing.Optional[RareStringData] = None
|
||||
|
||||
#: Only set for radio and checkbox input elements, indicates if the element has been checked
|
||||
input_checked: typing.Optional[RareBooleanData] = None
|
||||
|
||||
#: Only set for option elements, indicates if the element has been selected
|
||||
option_selected: typing.Optional[RareBooleanData] = None
|
||||
|
||||
#: The index of the document in the list of the snapshot documents.
|
||||
content_document_index: typing.Optional[RareIntegerData] = None
|
||||
|
||||
#: Type of a pseudo element node.
|
||||
pseudo_type: typing.Optional[RareStringData] = None
|
||||
|
||||
#: Pseudo element identifier for this node. Only present if there is a
|
||||
#: valid pseudoType.
|
||||
pseudo_identifier: typing.Optional[RareStringData] = None
|
||||
|
||||
#: Whether this DOM node responds to mouse clicks. This includes nodes that have had click
|
||||
#: event listeners attached via JavaScript as well as anchor tags that naturally navigate when
|
||||
#: clicked.
|
||||
is_clickable: typing.Optional[RareBooleanData] = None
|
||||
|
||||
#: The selected url for nodes with a srcset attribute.
|
||||
current_source_url: typing.Optional[RareStringData] = None
|
||||
|
||||
#: The url of the script (if any) that generates this node.
|
||||
origin_url: typing.Optional[RareStringData] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
if self.parent_index is not None:
|
||||
json['parentIndex'] = [i for i in self.parent_index]
|
||||
if self.node_type is not None:
|
||||
json['nodeType'] = [i for i in self.node_type]
|
||||
if self.shadow_root_type is not None:
|
||||
json['shadowRootType'] = self.shadow_root_type.to_json()
|
||||
if self.node_name is not None:
|
||||
json['nodeName'] = [i.to_json() for i in self.node_name]
|
||||
if self.node_value is not None:
|
||||
json['nodeValue'] = [i.to_json() for i in self.node_value]
|
||||
if self.backend_node_id is not None:
|
||||
json['backendNodeId'] = [i.to_json() for i in self.backend_node_id]
|
||||
if self.attributes is not None:
|
||||
json['attributes'] = [i.to_json() for i in self.attributes]
|
||||
if self.text_value is not None:
|
||||
json['textValue'] = self.text_value.to_json()
|
||||
if self.input_value is not None:
|
||||
json['inputValue'] = self.input_value.to_json()
|
||||
if self.input_checked is not None:
|
||||
json['inputChecked'] = self.input_checked.to_json()
|
||||
if self.option_selected is not None:
|
||||
json['optionSelected'] = self.option_selected.to_json()
|
||||
if self.content_document_index is not None:
|
||||
json['contentDocumentIndex'] = self.content_document_index.to_json()
|
||||
if self.pseudo_type is not None:
|
||||
json['pseudoType'] = self.pseudo_type.to_json()
|
||||
if self.pseudo_identifier is not None:
|
||||
json['pseudoIdentifier'] = self.pseudo_identifier.to_json()
|
||||
if self.is_clickable is not None:
|
||||
json['isClickable'] = self.is_clickable.to_json()
|
||||
if self.current_source_url is not None:
|
||||
json['currentSourceURL'] = self.current_source_url.to_json()
|
||||
if self.origin_url is not None:
|
||||
json['originURL'] = self.origin_url.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
parent_index=[int(i) for i in json['parentIndex']] if 'parentIndex' in json else None,
|
||||
node_type=[int(i) for i in json['nodeType']] if 'nodeType' in json else None,
|
||||
shadow_root_type=RareStringData.from_json(json['shadowRootType']) if 'shadowRootType' in json else None,
|
||||
node_name=[StringIndex.from_json(i) for i in json['nodeName']] if 'nodeName' in json else None,
|
||||
node_value=[StringIndex.from_json(i) for i in json['nodeValue']] if 'nodeValue' in json else None,
|
||||
backend_node_id=[dom.BackendNodeId.from_json(i) for i in json['backendNodeId']] if 'backendNodeId' in json else None,
|
||||
attributes=[ArrayOfStrings.from_json(i) for i in json['attributes']] if 'attributes' in json else None,
|
||||
text_value=RareStringData.from_json(json['textValue']) if 'textValue' in json else None,
|
||||
input_value=RareStringData.from_json(json['inputValue']) if 'inputValue' in json else None,
|
||||
input_checked=RareBooleanData.from_json(json['inputChecked']) if 'inputChecked' in json else None,
|
||||
option_selected=RareBooleanData.from_json(json['optionSelected']) if 'optionSelected' in json else None,
|
||||
content_document_index=RareIntegerData.from_json(json['contentDocumentIndex']) if 'contentDocumentIndex' in json else None,
|
||||
pseudo_type=RareStringData.from_json(json['pseudoType']) if 'pseudoType' in json else None,
|
||||
pseudo_identifier=RareStringData.from_json(json['pseudoIdentifier']) if 'pseudoIdentifier' in json else None,
|
||||
is_clickable=RareBooleanData.from_json(json['isClickable']) if 'isClickable' in json else None,
|
||||
current_source_url=RareStringData.from_json(json['currentSourceURL']) if 'currentSourceURL' in json else None,
|
||||
origin_url=RareStringData.from_json(json['originURL']) if 'originURL' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class LayoutTreeSnapshot:
|
||||
'''
|
||||
Table of details of an element in the DOM tree with a LayoutObject.
|
||||
'''
|
||||
#: Index of the corresponding node in the ``NodeTreeSnapshot`` array returned by ``captureSnapshot``.
|
||||
node_index: typing.List[int]
|
||||
|
||||
#: Array of indexes specifying computed style strings, filtered according to the ``computedStyles`` parameter passed to ``captureSnapshot``.
|
||||
styles: typing.List[ArrayOfStrings]
|
||||
|
||||
#: The absolute position bounding box.
|
||||
bounds: typing.List[Rectangle]
|
||||
|
||||
#: Contents of the LayoutText, if any.
|
||||
text: typing.List[StringIndex]
|
||||
|
||||
#: Stacking context information.
|
||||
stacking_contexts: RareBooleanData
|
||||
|
||||
#: Global paint order index, which is determined by the stacking order of the nodes. Nodes
|
||||
#: that are painted together will have the same index. Only provided if includePaintOrder in
|
||||
#: captureSnapshot was true.
|
||||
paint_orders: typing.Optional[typing.List[int]] = None
|
||||
|
||||
#: The offset rect of nodes. Only available when includeDOMRects is set to true
|
||||
offset_rects: typing.Optional[typing.List[Rectangle]] = None
|
||||
|
||||
#: The scroll rect of nodes. Only available when includeDOMRects is set to true
|
||||
scroll_rects: typing.Optional[typing.List[Rectangle]] = None
|
||||
|
||||
#: The client rect of nodes. Only available when includeDOMRects is set to true
|
||||
client_rects: typing.Optional[typing.List[Rectangle]] = None
|
||||
|
||||
#: The list of background colors that are blended with colors of overlapping elements.
|
||||
blended_background_colors: typing.Optional[typing.List[StringIndex]] = None
|
||||
|
||||
#: The list of computed text opacities.
|
||||
text_color_opacities: typing.Optional[typing.List[float]] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['nodeIndex'] = [i for i in self.node_index]
|
||||
json['styles'] = [i.to_json() for i in self.styles]
|
||||
json['bounds'] = [i.to_json() for i in self.bounds]
|
||||
json['text'] = [i.to_json() for i in self.text]
|
||||
json['stackingContexts'] = self.stacking_contexts.to_json()
|
||||
if self.paint_orders is not None:
|
||||
json['paintOrders'] = [i for i in self.paint_orders]
|
||||
if self.offset_rects is not None:
|
||||
json['offsetRects'] = [i.to_json() for i in self.offset_rects]
|
||||
if self.scroll_rects is not None:
|
||||
json['scrollRects'] = [i.to_json() for i in self.scroll_rects]
|
||||
if self.client_rects is not None:
|
||||
json['clientRects'] = [i.to_json() for i in self.client_rects]
|
||||
if self.blended_background_colors is not None:
|
||||
json['blendedBackgroundColors'] = [i.to_json() for i in self.blended_background_colors]
|
||||
if self.text_color_opacities is not None:
|
||||
json['textColorOpacities'] = [i for i in self.text_color_opacities]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
node_index=[int(i) for i in json['nodeIndex']],
|
||||
styles=[ArrayOfStrings.from_json(i) for i in json['styles']],
|
||||
bounds=[Rectangle.from_json(i) for i in json['bounds']],
|
||||
text=[StringIndex.from_json(i) for i in json['text']],
|
||||
stacking_contexts=RareBooleanData.from_json(json['stackingContexts']),
|
||||
paint_orders=[int(i) for i in json['paintOrders']] if 'paintOrders' in json else None,
|
||||
offset_rects=[Rectangle.from_json(i) for i in json['offsetRects']] if 'offsetRects' in json else None,
|
||||
scroll_rects=[Rectangle.from_json(i) for i in json['scrollRects']] if 'scrollRects' in json else None,
|
||||
client_rects=[Rectangle.from_json(i) for i in json['clientRects']] if 'clientRects' in json else None,
|
||||
blended_background_colors=[StringIndex.from_json(i) for i in json['blendedBackgroundColors']] if 'blendedBackgroundColors' in json else None,
|
||||
text_color_opacities=[float(i) for i in json['textColorOpacities']] if 'textColorOpacities' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class TextBoxSnapshot:
|
||||
'''
|
||||
Table of details of the post layout rendered text positions. The exact layout should not be regarded as
|
||||
stable and may change between versions.
|
||||
'''
|
||||
#: Index of the layout tree node that owns this box collection.
|
||||
layout_index: typing.List[int]
|
||||
|
||||
#: The absolute position bounding box.
|
||||
bounds: typing.List[Rectangle]
|
||||
|
||||
#: The starting index in characters, for this post layout textbox substring. Characters that
|
||||
#: would be represented as a surrogate pair in UTF-16 have length 2.
|
||||
start: typing.List[int]
|
||||
|
||||
#: The number of characters in this post layout textbox substring. Characters that would be
|
||||
#: represented as a surrogate pair in UTF-16 have length 2.
|
||||
length: typing.List[int]
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['layoutIndex'] = [i for i in self.layout_index]
|
||||
json['bounds'] = [i.to_json() for i in self.bounds]
|
||||
json['start'] = [i for i in self.start]
|
||||
json['length'] = [i for i in self.length]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
layout_index=[int(i) for i in json['layoutIndex']],
|
||||
bounds=[Rectangle.from_json(i) for i in json['bounds']],
|
||||
start=[int(i) for i in json['start']],
|
||||
length=[int(i) for i in json['length']],
|
||||
)
|
||||
|
||||
|
||||
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Disables DOM snapshot agent for the given page.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMSnapshot.disable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def enable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enables DOM snapshot agent for the given page.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMSnapshot.enable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def get_snapshot(
|
||||
computed_style_whitelist: typing.List[str],
|
||||
include_event_listeners: typing.Optional[bool] = None,
|
||||
include_paint_order: typing.Optional[bool] = None,
|
||||
include_user_agent_shadow_tree: typing.Optional[bool] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.Tuple[typing.List[DOMNode], typing.List[LayoutTreeNode], typing.List[ComputedStyle]]]:
|
||||
'''
|
||||
Returns a document snapshot, including the full DOM tree of the root node (including iframes,
|
||||
template contents, and imported documents) in a flattened array, as well as layout and
|
||||
white-listed computed style information for the nodes. Shadow DOM in the returned DOM tree is
|
||||
flattened.
|
||||
|
||||
:param computed_style_whitelist: Whitelist of computed styles to return.
|
||||
:param include_event_listeners: *(Optional)* Whether or not to retrieve details of DOM listeners (default false).
|
||||
:param include_paint_order: *(Optional)* Whether to determine and include the paint order index of LayoutTreeNodes (default false).
|
||||
:param include_user_agent_shadow_tree: *(Optional)* Whether to include UA shadow tree in the snapshot (default false).
|
||||
:returns: A tuple with the following items:
|
||||
|
||||
0. **domNodes** - The nodes in the DOM tree. The DOMNode at index 0 corresponds to the root document.
|
||||
1. **layoutTreeNodes** - The nodes in the layout tree.
|
||||
2. **computedStyles** - Whitelisted ComputedStyle properties for each node in the layout tree.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['computedStyleWhitelist'] = [i for i in computed_style_whitelist]
|
||||
if include_event_listeners is not None:
|
||||
params['includeEventListeners'] = include_event_listeners
|
||||
if include_paint_order is not None:
|
||||
params['includePaintOrder'] = include_paint_order
|
||||
if include_user_agent_shadow_tree is not None:
|
||||
params['includeUserAgentShadowTree'] = include_user_agent_shadow_tree
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMSnapshot.getSnapshot',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return (
|
||||
[DOMNode.from_json(i) for i in json['domNodes']],
|
||||
[LayoutTreeNode.from_json(i) for i in json['layoutTreeNodes']],
|
||||
[ComputedStyle.from_json(i) for i in json['computedStyles']]
|
||||
)
|
||||
|
||||
|
||||
def capture_snapshot(
|
||||
computed_styles: typing.List[str],
|
||||
include_paint_order: typing.Optional[bool] = None,
|
||||
include_dom_rects: typing.Optional[bool] = None,
|
||||
include_blended_background_colors: typing.Optional[bool] = None,
|
||||
include_text_color_opacities: typing.Optional[bool] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.Tuple[typing.List[DocumentSnapshot], typing.List[str]]]:
|
||||
'''
|
||||
Returns a document snapshot, including the full DOM tree of the root node (including iframes,
|
||||
template contents, and imported documents) in a flattened array, as well as layout and
|
||||
white-listed computed style information for the nodes. Shadow DOM in the returned DOM tree is
|
||||
flattened.
|
||||
|
||||
:param computed_styles: Whitelist of computed styles to return.
|
||||
:param include_paint_order: *(Optional)* Whether to include layout object paint orders into the snapshot.
|
||||
:param include_dom_rects: *(Optional)* Whether to include DOM rectangles (offsetRects, clientRects, scrollRects) into the snapshot
|
||||
:param include_blended_background_colors: **(EXPERIMENTAL)** *(Optional)* Whether to include blended background colors in the snapshot (default: false). Blended background color is achieved by blending background colors of all elements that overlap with the current element.
|
||||
:param include_text_color_opacities: **(EXPERIMENTAL)** *(Optional)* Whether to include text color opacity in the snapshot (default: false). An element might have the opacity property set that affects the text color of the element. The final text color opacity is computed based on the opacity of all overlapping elements.
|
||||
:returns: A tuple with the following items:
|
||||
|
||||
0. **documents** - The nodes in the DOM tree. The DOMNode at index 0 corresponds to the root document.
|
||||
1. **strings** - Shared string table that all string properties refer to with indexes.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['computedStyles'] = [i for i in computed_styles]
|
||||
if include_paint_order is not None:
|
||||
params['includePaintOrder'] = include_paint_order
|
||||
if include_dom_rects is not None:
|
||||
params['includeDOMRects'] = include_dom_rects
|
||||
if include_blended_background_colors is not None:
|
||||
params['includeBlendedBackgroundColors'] = include_blended_background_colors
|
||||
if include_text_color_opacities is not None:
|
||||
params['includeTextColorOpacities'] = include_text_color_opacities
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMSnapshot.captureSnapshot',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return (
|
||||
[DocumentSnapshot.from_json(i) for i in json['documents']],
|
||||
[str(i) for i in json['strings']]
|
||||
)
|
@ -0,0 +1,220 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: DOMStorage (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
|
||||
class SerializedStorageKey(str):
|
||||
def to_json(self) -> str:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: str) -> SerializedStorageKey:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'SerializedStorageKey({})'.format(super().__repr__())
|
||||
|
||||
|
||||
@dataclass
|
||||
class StorageId:
|
||||
'''
|
||||
DOM Storage identifier.
|
||||
'''
|
||||
#: Whether the storage is local storage (not session storage).
|
||||
is_local_storage: bool
|
||||
|
||||
#: Security origin for the storage.
|
||||
security_origin: typing.Optional[str] = None
|
||||
|
||||
#: Represents a key by which DOM Storage keys its CachedStorageAreas
|
||||
storage_key: typing.Optional[SerializedStorageKey] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['isLocalStorage'] = self.is_local_storage
|
||||
if self.security_origin is not None:
|
||||
json['securityOrigin'] = self.security_origin
|
||||
if self.storage_key is not None:
|
||||
json['storageKey'] = self.storage_key.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
is_local_storage=bool(json['isLocalStorage']),
|
||||
security_origin=str(json['securityOrigin']) if 'securityOrigin' in json else None,
|
||||
storage_key=SerializedStorageKey.from_json(json['storageKey']) if 'storageKey' in json else None,
|
||||
)
|
||||
|
||||
|
||||
class Item(list):
|
||||
'''
|
||||
DOM Storage item.
|
||||
'''
|
||||
def to_json(self) -> typing.List[str]:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: typing.List[str]) -> Item:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'Item({})'.format(super().__repr__())
|
||||
|
||||
|
||||
def clear(
|
||||
storage_id: StorageId
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
:param storage_id:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['storageId'] = storage_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMStorage.clear',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Disables storage tracking, prevents storage events from being sent to the client.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMStorage.disable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def enable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enables storage tracking, storage events will now be delivered to the client.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMStorage.enable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def get_dom_storage_items(
|
||||
storage_id: StorageId
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[Item]]:
|
||||
'''
|
||||
:param storage_id:
|
||||
:returns:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['storageId'] = storage_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMStorage.getDOMStorageItems',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [Item.from_json(i) for i in json['entries']]
|
||||
|
||||
|
||||
def remove_dom_storage_item(
|
||||
storage_id: StorageId,
|
||||
key: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
:param storage_id:
|
||||
:param key:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['storageId'] = storage_id.to_json()
|
||||
params['key'] = key
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMStorage.removeDOMStorageItem',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_dom_storage_item(
|
||||
storage_id: StorageId,
|
||||
key: str,
|
||||
value: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
:param storage_id:
|
||||
:param key:
|
||||
:param value:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['storageId'] = storage_id.to_json()
|
||||
params['key'] = key
|
||||
params['value'] = value
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'DOMStorage.setDOMStorageItem',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
@event_class('DOMStorage.domStorageItemAdded')
|
||||
@dataclass
|
||||
class DomStorageItemAdded:
|
||||
storage_id: StorageId
|
||||
key: str
|
||||
new_value: str
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> DomStorageItemAdded:
|
||||
return cls(
|
||||
storage_id=StorageId.from_json(json['storageId']),
|
||||
key=str(json['key']),
|
||||
new_value=str(json['newValue'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('DOMStorage.domStorageItemRemoved')
|
||||
@dataclass
|
||||
class DomStorageItemRemoved:
|
||||
storage_id: StorageId
|
||||
key: str
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> DomStorageItemRemoved:
|
||||
return cls(
|
||||
storage_id=StorageId.from_json(json['storageId']),
|
||||
key=str(json['key'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('DOMStorage.domStorageItemUpdated')
|
||||
@dataclass
|
||||
class DomStorageItemUpdated:
|
||||
storage_id: StorageId
|
||||
key: str
|
||||
old_value: str
|
||||
new_value: str
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> DomStorageItemUpdated:
|
||||
return cls(
|
||||
storage_id=StorageId.from_json(json['storageId']),
|
||||
key=str(json['key']),
|
||||
old_value=str(json['oldValue']),
|
||||
new_value=str(json['newValue'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('DOMStorage.domStorageItemsCleared')
|
||||
@dataclass
|
||||
class DomStorageItemsCleared:
|
||||
storage_id: StorageId
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> DomStorageItemsCleared:
|
||||
return cls(
|
||||
storage_id=StorageId.from_json(json['storageId'])
|
||||
)
|
1195
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/emulation.py
Executable file
1195
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/emulation.py
Executable file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,54 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: EventBreakpoints (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
|
||||
def set_instrumentation_breakpoint(
|
||||
event_name: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Sets breakpoint on particular native event.
|
||||
|
||||
:param event_name: Instrumentation name to stop on.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['eventName'] = event_name
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'EventBreakpoints.setInstrumentationBreakpoint',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def remove_instrumentation_breakpoint(
|
||||
event_name: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Removes breakpoint on particular native event.
|
||||
|
||||
:param event_name: Instrumentation name to stop on.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['eventName'] = event_name
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'EventBreakpoints.removeInstrumentationBreakpoint',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Removes all breakpoints
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'EventBreakpoints.disable',
|
||||
}
|
||||
json = yield cmd_dict
|
@ -0,0 +1,144 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: Extensions (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
|
||||
class StorageArea(enum.Enum):
|
||||
'''
|
||||
Storage areas.
|
||||
'''
|
||||
SESSION = "session"
|
||||
LOCAL = "local"
|
||||
SYNC = "sync"
|
||||
MANAGED = "managed"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
def load_unpacked(
|
||||
path: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,str]:
|
||||
'''
|
||||
Installs an unpacked extension from the filesystem similar to
|
||||
--load-extension CLI flags. Returns extension ID once the extension
|
||||
has been installed. Available if the client is connected using the
|
||||
--remote-debugging-pipe flag and the --enable-unsafe-extension-debugging
|
||||
flag is set.
|
||||
|
||||
:param path: Absolute file path.
|
||||
:returns: Extension id.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['path'] = path
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Extensions.loadUnpacked',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return str(json['id'])
|
||||
|
||||
|
||||
def get_storage_items(
|
||||
id_: str,
|
||||
storage_area: StorageArea,
|
||||
keys: typing.Optional[typing.List[str]] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,dict]:
|
||||
'''
|
||||
Gets data from extension storage in the given ``storageArea``. If ``keys`` is
|
||||
specified, these are used to filter the result.
|
||||
|
||||
:param id_: ID of extension.
|
||||
:param storage_area: StorageArea to retrieve data from.
|
||||
:param keys: *(Optional)* Keys to retrieve.
|
||||
:returns:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['id'] = id_
|
||||
params['storageArea'] = storage_area.to_json()
|
||||
if keys is not None:
|
||||
params['keys'] = [i for i in keys]
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Extensions.getStorageItems',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return dict(json['data'])
|
||||
|
||||
|
||||
def remove_storage_items(
|
||||
id_: str,
|
||||
storage_area: StorageArea,
|
||||
keys: typing.List[str]
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Removes ``keys`` from extension storage in the given ``storageArea``.
|
||||
|
||||
:param id_: ID of extension.
|
||||
:param storage_area: StorageArea to remove data from.
|
||||
:param keys: Keys to remove.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['id'] = id_
|
||||
params['storageArea'] = storage_area.to_json()
|
||||
params['keys'] = [i for i in keys]
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Extensions.removeStorageItems',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def clear_storage_items(
|
||||
id_: str,
|
||||
storage_area: StorageArea
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Clears extension storage in the given ``storageArea``.
|
||||
|
||||
:param id_: ID of extension.
|
||||
:param storage_area: StorageArea to remove data from.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['id'] = id_
|
||||
params['storageArea'] = storage_area.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Extensions.clearStorageItems',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_storage_items(
|
||||
id_: str,
|
||||
storage_area: StorageArea,
|
||||
values: dict
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Sets ``values`` in extension storage in the given ``storageArea``. The provided ``values``
|
||||
will be merged with existing values in the storage area.
|
||||
|
||||
:param id_: ID of extension.
|
||||
:param storage_area: StorageArea to set data in.
|
||||
:param values: Values to set.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['id'] = id_
|
||||
params['storageArea'] = storage_area.to_json()
|
||||
params['values'] = values
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Extensions.setStorageItems',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
281
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/fed_cm.py
Executable file
281
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/fed_cm.py
Executable file
@ -0,0 +1,281 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: FedCm (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
|
||||
class LoginState(enum.Enum):
|
||||
'''
|
||||
Whether this is a sign-up or sign-in action for this account, i.e.
|
||||
whether this account has ever been used to sign in to this RP before.
|
||||
'''
|
||||
SIGN_IN = "SignIn"
|
||||
SIGN_UP = "SignUp"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
class DialogType(enum.Enum):
|
||||
'''
|
||||
The types of FedCM dialogs.
|
||||
'''
|
||||
ACCOUNT_CHOOSER = "AccountChooser"
|
||||
AUTO_REAUTHN = "AutoReauthn"
|
||||
CONFIRM_IDP_LOGIN = "ConfirmIdpLogin"
|
||||
ERROR = "Error"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
class DialogButton(enum.Enum):
|
||||
'''
|
||||
The buttons on the FedCM dialog.
|
||||
'''
|
||||
CONFIRM_IDP_LOGIN_CONTINUE = "ConfirmIdpLoginContinue"
|
||||
ERROR_GOT_IT = "ErrorGotIt"
|
||||
ERROR_MORE_DETAILS = "ErrorMoreDetails"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
class AccountUrlType(enum.Enum):
|
||||
'''
|
||||
The URLs that each account has
|
||||
'''
|
||||
TERMS_OF_SERVICE = "TermsOfService"
|
||||
PRIVACY_POLICY = "PrivacyPolicy"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
@dataclass
|
||||
class Account:
|
||||
'''
|
||||
Corresponds to IdentityRequestAccount
|
||||
'''
|
||||
account_id: str
|
||||
|
||||
email: str
|
||||
|
||||
name: str
|
||||
|
||||
given_name: str
|
||||
|
||||
picture_url: str
|
||||
|
||||
idp_config_url: str
|
||||
|
||||
idp_login_url: str
|
||||
|
||||
login_state: LoginState
|
||||
|
||||
#: These two are only set if the loginState is signUp
|
||||
terms_of_service_url: typing.Optional[str] = None
|
||||
|
||||
privacy_policy_url: typing.Optional[str] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['accountId'] = self.account_id
|
||||
json['email'] = self.email
|
||||
json['name'] = self.name
|
||||
json['givenName'] = self.given_name
|
||||
json['pictureUrl'] = self.picture_url
|
||||
json['idpConfigUrl'] = self.idp_config_url
|
||||
json['idpLoginUrl'] = self.idp_login_url
|
||||
json['loginState'] = self.login_state.to_json()
|
||||
if self.terms_of_service_url is not None:
|
||||
json['termsOfServiceUrl'] = self.terms_of_service_url
|
||||
if self.privacy_policy_url is not None:
|
||||
json['privacyPolicyUrl'] = self.privacy_policy_url
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
account_id=str(json['accountId']),
|
||||
email=str(json['email']),
|
||||
name=str(json['name']),
|
||||
given_name=str(json['givenName']),
|
||||
picture_url=str(json['pictureUrl']),
|
||||
idp_config_url=str(json['idpConfigUrl']),
|
||||
idp_login_url=str(json['idpLoginUrl']),
|
||||
login_state=LoginState.from_json(json['loginState']),
|
||||
terms_of_service_url=str(json['termsOfServiceUrl']) if 'termsOfServiceUrl' in json else None,
|
||||
privacy_policy_url=str(json['privacyPolicyUrl']) if 'privacyPolicyUrl' in json else None,
|
||||
)
|
||||
|
||||
|
||||
def enable(
|
||||
disable_rejection_delay: typing.Optional[bool] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
:param disable_rejection_delay: *(Optional)* Allows callers to disable the promise rejection delay that would normally happen, if this is unimportant to what's being tested. (step 4 of https://fedidcg.github.io/FedCM/#browser-api-rp-sign-in)
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if disable_rejection_delay is not None:
|
||||
params['disableRejectionDelay'] = disable_rejection_delay
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'FedCm.enable',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'FedCm.disable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def select_account(
|
||||
dialog_id: str,
|
||||
account_index: int
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
:param dialog_id:
|
||||
:param account_index:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['dialogId'] = dialog_id
|
||||
params['accountIndex'] = account_index
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'FedCm.selectAccount',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def click_dialog_button(
|
||||
dialog_id: str,
|
||||
dialog_button: DialogButton
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
:param dialog_id:
|
||||
:param dialog_button:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['dialogId'] = dialog_id
|
||||
params['dialogButton'] = dialog_button.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'FedCm.clickDialogButton',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def open_url(
|
||||
dialog_id: str,
|
||||
account_index: int,
|
||||
account_url_type: AccountUrlType
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
:param dialog_id:
|
||||
:param account_index:
|
||||
:param account_url_type:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['dialogId'] = dialog_id
|
||||
params['accountIndex'] = account_index
|
||||
params['accountUrlType'] = account_url_type.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'FedCm.openUrl',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def dismiss_dialog(
|
||||
dialog_id: str,
|
||||
trigger_cooldown: typing.Optional[bool] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
:param dialog_id:
|
||||
:param trigger_cooldown: *(Optional)*
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['dialogId'] = dialog_id
|
||||
if trigger_cooldown is not None:
|
||||
params['triggerCooldown'] = trigger_cooldown
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'FedCm.dismissDialog',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def reset_cooldown() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Resets the cooldown time, if any, to allow the next FedCM call to show
|
||||
a dialog even if one was recently dismissed by the user.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'FedCm.resetCooldown',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
@event_class('FedCm.dialogShown')
|
||||
@dataclass
|
||||
class DialogShown:
|
||||
dialog_id: str
|
||||
dialog_type: DialogType
|
||||
accounts: typing.List[Account]
|
||||
#: These exist primarily so that the caller can verify the
|
||||
#: RP context was used appropriately.
|
||||
title: str
|
||||
subtitle: typing.Optional[str]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> DialogShown:
|
||||
return cls(
|
||||
dialog_id=str(json['dialogId']),
|
||||
dialog_type=DialogType.from_json(json['dialogType']),
|
||||
accounts=[Account.from_json(i) for i in json['accounts']],
|
||||
title=str(json['title']),
|
||||
subtitle=str(json['subtitle']) if 'subtitle' in json else None
|
||||
)
|
||||
|
||||
|
||||
@event_class('FedCm.dialogClosed')
|
||||
@dataclass
|
||||
class DialogClosed:
|
||||
'''
|
||||
Triggered when a dialog is closed, either by user action, JS abort,
|
||||
or a command below.
|
||||
'''
|
||||
dialog_id: str
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> DialogClosed:
|
||||
return cls(
|
||||
dialog_id=str(json['dialogId'])
|
||||
)
|
503
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/fetch.py
Executable file
503
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/fetch.py
Executable file
@ -0,0 +1,503 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: Fetch
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
from . import io
|
||||
from . import network
|
||||
from . import page
|
||||
|
||||
|
||||
class RequestId(str):
|
||||
'''
|
||||
Unique request identifier.
|
||||
'''
|
||||
def to_json(self) -> str:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: str) -> RequestId:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'RequestId({})'.format(super().__repr__())
|
||||
|
||||
|
||||
class RequestStage(enum.Enum):
|
||||
'''
|
||||
Stages of the request to handle. Request will intercept before the request is
|
||||
sent. Response will intercept after the response is received (but before response
|
||||
body is received).
|
||||
'''
|
||||
REQUEST = "Request"
|
||||
RESPONSE = "Response"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
@dataclass
|
||||
class RequestPattern:
|
||||
#: Wildcards (``'*'`` -> zero or more, ``'?'`` -> exactly one) are allowed. Escape character is
|
||||
#: backslash. Omitting is equivalent to ``"*"``.
|
||||
url_pattern: typing.Optional[str] = None
|
||||
|
||||
#: If set, only requests for matching resource types will be intercepted.
|
||||
resource_type: typing.Optional[network.ResourceType] = None
|
||||
|
||||
#: Stage at which to begin intercepting requests. Default is Request.
|
||||
request_stage: typing.Optional[RequestStage] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
if self.url_pattern is not None:
|
||||
json['urlPattern'] = self.url_pattern
|
||||
if self.resource_type is not None:
|
||||
json['resourceType'] = self.resource_type.to_json()
|
||||
if self.request_stage is not None:
|
||||
json['requestStage'] = self.request_stage.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
url_pattern=str(json['urlPattern']) if 'urlPattern' in json else None,
|
||||
resource_type=network.ResourceType.from_json(json['resourceType']) if 'resourceType' in json else None,
|
||||
request_stage=RequestStage.from_json(json['requestStage']) if 'requestStage' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class HeaderEntry:
|
||||
'''
|
||||
Response HTTP header entry
|
||||
'''
|
||||
name: str
|
||||
|
||||
value: str
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['name'] = self.name
|
||||
json['value'] = self.value
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
name=str(json['name']),
|
||||
value=str(json['value']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class AuthChallenge:
|
||||
'''
|
||||
Authorization challenge for HTTP status code 401 or 407.
|
||||
'''
|
||||
#: Origin of the challenger.
|
||||
origin: str
|
||||
|
||||
#: The authentication scheme used, such as basic or digest
|
||||
scheme: str
|
||||
|
||||
#: The realm of the challenge. May be empty.
|
||||
realm: str
|
||||
|
||||
#: Source of the authentication challenge.
|
||||
source: typing.Optional[str] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['origin'] = self.origin
|
||||
json['scheme'] = self.scheme
|
||||
json['realm'] = self.realm
|
||||
if self.source is not None:
|
||||
json['source'] = self.source
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
origin=str(json['origin']),
|
||||
scheme=str(json['scheme']),
|
||||
realm=str(json['realm']),
|
||||
source=str(json['source']) if 'source' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class AuthChallengeResponse:
|
||||
'''
|
||||
Response to an AuthChallenge.
|
||||
'''
|
||||
#: The decision on what to do in response to the authorization challenge. Default means
|
||||
#: deferring to the default behavior of the net stack, which will likely either the Cancel
|
||||
#: authentication or display a popup dialog box.
|
||||
response: str
|
||||
|
||||
#: The username to provide, possibly empty. Should only be set if response is
|
||||
#: ProvideCredentials.
|
||||
username: typing.Optional[str] = None
|
||||
|
||||
#: The password to provide, possibly empty. Should only be set if response is
|
||||
#: ProvideCredentials.
|
||||
password: typing.Optional[str] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['response'] = self.response
|
||||
if self.username is not None:
|
||||
json['username'] = self.username
|
||||
if self.password is not None:
|
||||
json['password'] = self.password
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
response=str(json['response']),
|
||||
username=str(json['username']) if 'username' in json else None,
|
||||
password=str(json['password']) if 'password' in json else None,
|
||||
)
|
||||
|
||||
|
||||
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Disables the fetch domain.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Fetch.disable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def enable(
|
||||
patterns: typing.Optional[typing.List[RequestPattern]] = None,
|
||||
handle_auth_requests: typing.Optional[bool] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enables issuing of requestPaused events. A request will be paused until client
|
||||
calls one of failRequest, fulfillRequest or continueRequest/continueWithAuth.
|
||||
|
||||
:param patterns: *(Optional)* If specified, only requests matching any of these patterns will produce fetchRequested event and will be paused until clients response. If not set, all requests will be affected.
|
||||
:param handle_auth_requests: *(Optional)* If true, authRequired events will be issued and requests will be paused expecting a call to continueWithAuth.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if patterns is not None:
|
||||
params['patterns'] = [i.to_json() for i in patterns]
|
||||
if handle_auth_requests is not None:
|
||||
params['handleAuthRequests'] = handle_auth_requests
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Fetch.enable',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def fail_request(
|
||||
request_id: RequestId,
|
||||
error_reason: network.ErrorReason
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Causes the request to fail with specified reason.
|
||||
|
||||
:param request_id: An id the client received in requestPaused event.
|
||||
:param error_reason: Causes the request to fail with the given reason.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['requestId'] = request_id.to_json()
|
||||
params['errorReason'] = error_reason.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Fetch.failRequest',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def fulfill_request(
|
||||
request_id: RequestId,
|
||||
response_code: int,
|
||||
response_headers: typing.Optional[typing.List[HeaderEntry]] = None,
|
||||
binary_response_headers: typing.Optional[str] = None,
|
||||
body: typing.Optional[str] = None,
|
||||
response_phrase: typing.Optional[str] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Provides response to the request.
|
||||
|
||||
:param request_id: An id the client received in requestPaused event.
|
||||
:param response_code: An HTTP response code.
|
||||
:param response_headers: *(Optional)* Response headers.
|
||||
:param binary_response_headers: *(Optional)* Alternative way of specifying response headers as a \0-separated series of name: value pairs. Prefer the above method unless you need to represent some non-UTF8 values that can't be transmitted over the protocol as text.
|
||||
:param body: *(Optional)* A response body. If absent, original response body will be used if the request is intercepted at the response stage and empty body will be used if the request is intercepted at the request stage.
|
||||
:param response_phrase: *(Optional)* A textual representation of responseCode. If absent, a standard phrase matching responseCode is used.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['requestId'] = request_id.to_json()
|
||||
params['responseCode'] = response_code
|
||||
if response_headers is not None:
|
||||
params['responseHeaders'] = [i.to_json() for i in response_headers]
|
||||
if binary_response_headers is not None:
|
||||
params['binaryResponseHeaders'] = binary_response_headers
|
||||
if body is not None:
|
||||
params['body'] = body
|
||||
if response_phrase is not None:
|
||||
params['responsePhrase'] = response_phrase
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Fetch.fulfillRequest',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def continue_request(
|
||||
request_id: RequestId,
|
||||
url: typing.Optional[str] = None,
|
||||
method: typing.Optional[str] = None,
|
||||
post_data: typing.Optional[str] = None,
|
||||
headers: typing.Optional[typing.List[HeaderEntry]] = None,
|
||||
intercept_response: typing.Optional[bool] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Continues the request, optionally modifying some of its parameters.
|
||||
|
||||
:param request_id: An id the client received in requestPaused event.
|
||||
:param url: *(Optional)* If set, the request url will be modified in a way that's not observable by page.
|
||||
:param method: *(Optional)* If set, the request method is overridden.
|
||||
:param post_data: *(Optional)* If set, overrides the post data in the request.
|
||||
:param headers: *(Optional)* If set, overrides the request headers. Note that the overrides do not extend to subsequent redirect hops, if a redirect happens. Another override may be applied to a different request produced by a redirect.
|
||||
:param intercept_response: **(EXPERIMENTAL)** *(Optional)* If set, overrides response interception behavior for this request.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['requestId'] = request_id.to_json()
|
||||
if url is not None:
|
||||
params['url'] = url
|
||||
if method is not None:
|
||||
params['method'] = method
|
||||
if post_data is not None:
|
||||
params['postData'] = post_data
|
||||
if headers is not None:
|
||||
params['headers'] = [i.to_json() for i in headers]
|
||||
if intercept_response is not None:
|
||||
params['interceptResponse'] = intercept_response
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Fetch.continueRequest',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def continue_with_auth(
|
||||
request_id: RequestId,
|
||||
auth_challenge_response: AuthChallengeResponse
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Continues a request supplying authChallengeResponse following authRequired event.
|
||||
|
||||
:param request_id: An id the client received in authRequired event.
|
||||
:param auth_challenge_response: Response to with an authChallenge.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['requestId'] = request_id.to_json()
|
||||
params['authChallengeResponse'] = auth_challenge_response.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Fetch.continueWithAuth',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def continue_response(
|
||||
request_id: RequestId,
|
||||
response_code: typing.Optional[int] = None,
|
||||
response_phrase: typing.Optional[str] = None,
|
||||
response_headers: typing.Optional[typing.List[HeaderEntry]] = None,
|
||||
binary_response_headers: typing.Optional[str] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Continues loading of the paused response, optionally modifying the
|
||||
response headers. If either responseCode or headers are modified, all of them
|
||||
must be present.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param request_id: An id the client received in requestPaused event.
|
||||
:param response_code: *(Optional)* An HTTP response code. If absent, original response code will be used.
|
||||
:param response_phrase: *(Optional)* A textual representation of responseCode. If absent, a standard phrase matching responseCode is used.
|
||||
:param response_headers: *(Optional)* Response headers. If absent, original response headers will be used.
|
||||
:param binary_response_headers: *(Optional)* Alternative way of specifying response headers as a \0-separated series of name: value pairs. Prefer the above method unless you need to represent some non-UTF8 values that can't be transmitted over the protocol as text.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['requestId'] = request_id.to_json()
|
||||
if response_code is not None:
|
||||
params['responseCode'] = response_code
|
||||
if response_phrase is not None:
|
||||
params['responsePhrase'] = response_phrase
|
||||
if response_headers is not None:
|
||||
params['responseHeaders'] = [i.to_json() for i in response_headers]
|
||||
if binary_response_headers is not None:
|
||||
params['binaryResponseHeaders'] = binary_response_headers
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Fetch.continueResponse',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def get_response_body(
|
||||
request_id: RequestId
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.Tuple[str, bool]]:
|
||||
'''
|
||||
Causes the body of the response to be received from the server and
|
||||
returned as a single string. May only be issued for a request that
|
||||
is paused in the Response stage and is mutually exclusive with
|
||||
takeResponseBodyForInterceptionAsStream. Calling other methods that
|
||||
affect the request or disabling fetch domain before body is received
|
||||
results in an undefined behavior.
|
||||
Note that the response body is not available for redirects. Requests
|
||||
paused in the _redirect received_ state may be differentiated by
|
||||
``responseCode`` and presence of ``location`` response header, see
|
||||
comments to ``requestPaused`` for details.
|
||||
|
||||
:param request_id: Identifier for the intercepted request to get body for.
|
||||
:returns: A tuple with the following items:
|
||||
|
||||
0. **body** - Response body.
|
||||
1. **base64Encoded** - True, if content was sent as base64.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['requestId'] = request_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Fetch.getResponseBody',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return (
|
||||
str(json['body']),
|
||||
bool(json['base64Encoded'])
|
||||
)
|
||||
|
||||
|
||||
def take_response_body_as_stream(
|
||||
request_id: RequestId
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,io.StreamHandle]:
|
||||
'''
|
||||
Returns a handle to the stream representing the response body.
|
||||
The request must be paused in the HeadersReceived stage.
|
||||
Note that after this command the request can't be continued
|
||||
as is -- client either needs to cancel it or to provide the
|
||||
response body.
|
||||
The stream only supports sequential read, IO.read will fail if the position
|
||||
is specified.
|
||||
This method is mutually exclusive with getResponseBody.
|
||||
Calling other methods that affect the request or disabling fetch
|
||||
domain before body is received results in an undefined behavior.
|
||||
|
||||
:param request_id:
|
||||
:returns:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['requestId'] = request_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Fetch.takeResponseBodyAsStream',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return io.StreamHandle.from_json(json['stream'])
|
||||
|
||||
|
||||
@event_class('Fetch.requestPaused')
|
||||
@dataclass
|
||||
class RequestPaused:
|
||||
'''
|
||||
Issued when the domain is enabled and the request URL matches the
|
||||
specified filter. The request is paused until the client responds
|
||||
with one of continueRequest, failRequest or fulfillRequest.
|
||||
The stage of the request can be determined by presence of responseErrorReason
|
||||
and responseStatusCode -- the request is at the response stage if either
|
||||
of these fields is present and in the request stage otherwise.
|
||||
Redirect responses and subsequent requests are reported similarly to regular
|
||||
responses and requests. Redirect responses may be distinguished by the value
|
||||
of ``responseStatusCode`` (which is one of 301, 302, 303, 307, 308) along with
|
||||
presence of the ``location`` header. Requests resulting from a redirect will
|
||||
have ``redirectedRequestId`` field set.
|
||||
'''
|
||||
#: Each request the page makes will have a unique id.
|
||||
request_id: RequestId
|
||||
#: The details of the request.
|
||||
request: network.Request
|
||||
#: The id of the frame that initiated the request.
|
||||
frame_id: page.FrameId
|
||||
#: How the requested resource will be used.
|
||||
resource_type: network.ResourceType
|
||||
#: Response error if intercepted at response stage.
|
||||
response_error_reason: typing.Optional[network.ErrorReason]
|
||||
#: Response code if intercepted at response stage.
|
||||
response_status_code: typing.Optional[int]
|
||||
#: Response status text if intercepted at response stage.
|
||||
response_status_text: typing.Optional[str]
|
||||
#: Response headers if intercepted at the response stage.
|
||||
response_headers: typing.Optional[typing.List[HeaderEntry]]
|
||||
#: If the intercepted request had a corresponding Network.requestWillBeSent event fired for it,
|
||||
#: then this networkId will be the same as the requestId present in the requestWillBeSent event.
|
||||
network_id: typing.Optional[network.RequestId]
|
||||
#: If the request is due to a redirect response from the server, the id of the request that
|
||||
#: has caused the redirect.
|
||||
redirected_request_id: typing.Optional[RequestId]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> RequestPaused:
|
||||
return cls(
|
||||
request_id=RequestId.from_json(json['requestId']),
|
||||
request=network.Request.from_json(json['request']),
|
||||
frame_id=page.FrameId.from_json(json['frameId']),
|
||||
resource_type=network.ResourceType.from_json(json['resourceType']),
|
||||
response_error_reason=network.ErrorReason.from_json(json['responseErrorReason']) if 'responseErrorReason' in json else None,
|
||||
response_status_code=int(json['responseStatusCode']) if 'responseStatusCode' in json else None,
|
||||
response_status_text=str(json['responseStatusText']) if 'responseStatusText' in json else None,
|
||||
response_headers=[HeaderEntry.from_json(i) for i in json['responseHeaders']] if 'responseHeaders' in json else None,
|
||||
network_id=network.RequestId.from_json(json['networkId']) if 'networkId' in json else None,
|
||||
redirected_request_id=RequestId.from_json(json['redirectedRequestId']) if 'redirectedRequestId' in json else None
|
||||
)
|
||||
|
||||
|
||||
@event_class('Fetch.authRequired')
|
||||
@dataclass
|
||||
class AuthRequired:
|
||||
'''
|
||||
Issued when the domain is enabled with handleAuthRequests set to true.
|
||||
The request is paused until client responds with continueWithAuth.
|
||||
'''
|
||||
#: Each request the page makes will have a unique id.
|
||||
request_id: RequestId
|
||||
#: The details of the request.
|
||||
request: network.Request
|
||||
#: The id of the frame that initiated the request.
|
||||
frame_id: page.FrameId
|
||||
#: How the requested resource will be used.
|
||||
resource_type: network.ResourceType
|
||||
#: Details of the Authorization Challenge encountered.
|
||||
#: If this is set, client should respond with continueRequest that
|
||||
#: contains AuthChallengeResponse.
|
||||
auth_challenge: AuthChallenge
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> AuthRequired:
|
||||
return cls(
|
||||
request_id=RequestId.from_json(json['requestId']),
|
||||
request=network.Request.from_json(json['request']),
|
||||
frame_id=page.FrameId.from_json(json['frameId']),
|
||||
resource_type=network.ResourceType.from_json(json['resourceType']),
|
||||
auth_challenge=AuthChallenge.from_json(json['authChallenge'])
|
||||
)
|
@ -0,0 +1,113 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: FileSystem (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
from . import network
|
||||
from . import storage
|
||||
|
||||
|
||||
@dataclass
|
||||
class File:
|
||||
name: str
|
||||
|
||||
#: Timestamp
|
||||
last_modified: network.TimeSinceEpoch
|
||||
|
||||
#: Size in bytes
|
||||
size: float
|
||||
|
||||
type_: str
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['name'] = self.name
|
||||
json['lastModified'] = self.last_modified.to_json()
|
||||
json['size'] = self.size
|
||||
json['type'] = self.type_
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
name=str(json['name']),
|
||||
last_modified=network.TimeSinceEpoch.from_json(json['lastModified']),
|
||||
size=float(json['size']),
|
||||
type_=str(json['type']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class Directory:
|
||||
name: str
|
||||
|
||||
nested_directories: typing.List[str]
|
||||
|
||||
#: Files that are directly nested under this directory.
|
||||
nested_files: typing.List[File]
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['name'] = self.name
|
||||
json['nestedDirectories'] = [i for i in self.nested_directories]
|
||||
json['nestedFiles'] = [i.to_json() for i in self.nested_files]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
name=str(json['name']),
|
||||
nested_directories=[str(i) for i in json['nestedDirectories']],
|
||||
nested_files=[File.from_json(i) for i in json['nestedFiles']],
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class BucketFileSystemLocator:
|
||||
#: Storage key
|
||||
storage_key: storage.SerializedStorageKey
|
||||
|
||||
#: Path to the directory using each path component as an array item.
|
||||
path_components: typing.List[str]
|
||||
|
||||
#: Bucket name. Not passing a ``bucketName`` will retrieve the default Bucket. (https://developer.mozilla.org/en-US/docs/Web/API/Storage_API#storage_buckets)
|
||||
bucket_name: typing.Optional[str] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['storageKey'] = self.storage_key.to_json()
|
||||
json['pathComponents'] = [i for i in self.path_components]
|
||||
if self.bucket_name is not None:
|
||||
json['bucketName'] = self.bucket_name
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
storage_key=storage.SerializedStorageKey.from_json(json['storageKey']),
|
||||
path_components=[str(i) for i in json['pathComponents']],
|
||||
bucket_name=str(json['bucketName']) if 'bucketName' in json else None,
|
||||
)
|
||||
|
||||
|
||||
def get_directory(
|
||||
bucket_file_system_locator: BucketFileSystemLocator
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,Directory]:
|
||||
'''
|
||||
:param bucket_file_system_locator:
|
||||
:returns: Returns the directory object at the path.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['bucketFileSystemLocator'] = bucket_file_system_locator.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'FileSystem.getDirectory',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return Directory.from_json(json['directory'])
|
@ -0,0 +1,104 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: HeadlessExperimental (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
|
||||
@dataclass
|
||||
class ScreenshotParams:
|
||||
'''
|
||||
Encoding options for a screenshot.
|
||||
'''
|
||||
#: Image compression format (defaults to png).
|
||||
format_: typing.Optional[str] = None
|
||||
|
||||
#: Compression quality from range [0..100] (jpeg and webp only).
|
||||
quality: typing.Optional[int] = None
|
||||
|
||||
#: Optimize image encoding for speed, not for resulting size (defaults to false)
|
||||
optimize_for_speed: typing.Optional[bool] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
if self.format_ is not None:
|
||||
json['format'] = self.format_
|
||||
if self.quality is not None:
|
||||
json['quality'] = self.quality
|
||||
if self.optimize_for_speed is not None:
|
||||
json['optimizeForSpeed'] = self.optimize_for_speed
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
format_=str(json['format']) if 'format' in json else None,
|
||||
quality=int(json['quality']) if 'quality' in json else None,
|
||||
optimize_for_speed=bool(json['optimizeForSpeed']) if 'optimizeForSpeed' in json else None,
|
||||
)
|
||||
|
||||
|
||||
def begin_frame(
|
||||
frame_time_ticks: typing.Optional[float] = None,
|
||||
interval: typing.Optional[float] = None,
|
||||
no_display_updates: typing.Optional[bool] = None,
|
||||
screenshot: typing.Optional[ScreenshotParams] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.Tuple[bool, typing.Optional[str]]]:
|
||||
'''
|
||||
Sends a BeginFrame to the target and returns when the frame was completed. Optionally captures a
|
||||
screenshot from the resulting frame. Requires that the target was created with enabled
|
||||
BeginFrameControl. Designed for use with --run-all-compositor-stages-before-draw, see also
|
||||
https://goo.gle/chrome-headless-rendering for more background.
|
||||
|
||||
:param frame_time_ticks: *(Optional)* Timestamp of this BeginFrame in Renderer TimeTicks (milliseconds of uptime). If not set, the current time will be used.
|
||||
:param interval: *(Optional)* The interval between BeginFrames that is reported to the compositor, in milliseconds. Defaults to a 60 frames/second interval, i.e. about 16.666 milliseconds.
|
||||
:param no_display_updates: *(Optional)* Whether updates should not be committed and drawn onto the display. False by default. If true, only side effects of the BeginFrame will be run, such as layout and animations, but any visual updates may not be visible on the display or in screenshots.
|
||||
:param screenshot: *(Optional)* If set, a screenshot of the frame will be captured and returned in the response. Otherwise, no screenshot will be captured. Note that capturing a screenshot can fail, for example, during renderer initialization. In such a case, no screenshot data will be returned.
|
||||
:returns: A tuple with the following items:
|
||||
|
||||
0. **hasDamage** - Whether the BeginFrame resulted in damage and, thus, a new frame was committed to the display. Reported for diagnostic uses, may be removed in the future.
|
||||
1. **screenshotData** - *(Optional)* Base64-encoded image data of the screenshot, if one was requested and successfully taken.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if frame_time_ticks is not None:
|
||||
params['frameTimeTicks'] = frame_time_ticks
|
||||
if interval is not None:
|
||||
params['interval'] = interval
|
||||
if no_display_updates is not None:
|
||||
params['noDisplayUpdates'] = no_display_updates
|
||||
if screenshot is not None:
|
||||
params['screenshot'] = screenshot.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'HeadlessExperimental.beginFrame',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return (
|
||||
bool(json['hasDamage']),
|
||||
str(json['screenshotData']) if 'screenshotData' in json else None
|
||||
)
|
||||
|
||||
|
||||
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Disables headless events for the target.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'HeadlessExperimental.disable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def enable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enables headless events for the target.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'HeadlessExperimental.enable',
|
||||
}
|
||||
json = yield cmd_dict
|
@ -0,0 +1,395 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: HeapProfiler (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
from . import runtime
|
||||
|
||||
|
||||
class HeapSnapshotObjectId(str):
|
||||
'''
|
||||
Heap snapshot object id.
|
||||
'''
|
||||
def to_json(self) -> str:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: str) -> HeapSnapshotObjectId:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'HeapSnapshotObjectId({})'.format(super().__repr__())
|
||||
|
||||
|
||||
@dataclass
|
||||
class SamplingHeapProfileNode:
|
||||
'''
|
||||
Sampling Heap Profile node. Holds callsite information, allocation statistics and child nodes.
|
||||
'''
|
||||
#: Function location.
|
||||
call_frame: runtime.CallFrame
|
||||
|
||||
#: Allocations size in bytes for the node excluding children.
|
||||
self_size: float
|
||||
|
||||
#: Node id. Ids are unique across all profiles collected between startSampling and stopSampling.
|
||||
id_: int
|
||||
|
||||
#: Child nodes.
|
||||
children: typing.List[SamplingHeapProfileNode]
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['callFrame'] = self.call_frame.to_json()
|
||||
json['selfSize'] = self.self_size
|
||||
json['id'] = self.id_
|
||||
json['children'] = [i.to_json() for i in self.children]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
call_frame=runtime.CallFrame.from_json(json['callFrame']),
|
||||
self_size=float(json['selfSize']),
|
||||
id_=int(json['id']),
|
||||
children=[SamplingHeapProfileNode.from_json(i) for i in json['children']],
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class SamplingHeapProfileSample:
|
||||
'''
|
||||
A single sample from a sampling profile.
|
||||
'''
|
||||
#: Allocation size in bytes attributed to the sample.
|
||||
size: float
|
||||
|
||||
#: Id of the corresponding profile tree node.
|
||||
node_id: int
|
||||
|
||||
#: Time-ordered sample ordinal number. It is unique across all profiles retrieved
|
||||
#: between startSampling and stopSampling.
|
||||
ordinal: float
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['size'] = self.size
|
||||
json['nodeId'] = self.node_id
|
||||
json['ordinal'] = self.ordinal
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
size=float(json['size']),
|
||||
node_id=int(json['nodeId']),
|
||||
ordinal=float(json['ordinal']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class SamplingHeapProfile:
|
||||
'''
|
||||
Sampling profile.
|
||||
'''
|
||||
head: SamplingHeapProfileNode
|
||||
|
||||
samples: typing.List[SamplingHeapProfileSample]
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['head'] = self.head.to_json()
|
||||
json['samples'] = [i.to_json() for i in self.samples]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
head=SamplingHeapProfileNode.from_json(json['head']),
|
||||
samples=[SamplingHeapProfileSample.from_json(i) for i in json['samples']],
|
||||
)
|
||||
|
||||
|
||||
def add_inspected_heap_object(
|
||||
heap_object_id: HeapSnapshotObjectId
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enables console to refer to the node with given id via $x (see Command Line API for more details
|
||||
$x functions).
|
||||
|
||||
:param heap_object_id: Heap snapshot object id to be accessible by means of $x command line API.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['heapObjectId'] = heap_object_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'HeapProfiler.addInspectedHeapObject',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def collect_garbage() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'HeapProfiler.collectGarbage',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'HeapProfiler.disable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def enable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'HeapProfiler.enable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def get_heap_object_id(
|
||||
object_id: runtime.RemoteObjectId
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,HeapSnapshotObjectId]:
|
||||
'''
|
||||
:param object_id: Identifier of the object to get heap object id for.
|
||||
:returns: Id of the heap snapshot object corresponding to the passed remote object id.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['objectId'] = object_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'HeapProfiler.getHeapObjectId',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return HeapSnapshotObjectId.from_json(json['heapSnapshotObjectId'])
|
||||
|
||||
|
||||
def get_object_by_heap_object_id(
|
||||
object_id: HeapSnapshotObjectId,
|
||||
object_group: typing.Optional[str] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,runtime.RemoteObject]:
|
||||
'''
|
||||
:param object_id:
|
||||
:param object_group: *(Optional)* Symbolic group name that can be used to release multiple objects.
|
||||
:returns: Evaluation result.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['objectId'] = object_id.to_json()
|
||||
if object_group is not None:
|
||||
params['objectGroup'] = object_group
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'HeapProfiler.getObjectByHeapObjectId',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return runtime.RemoteObject.from_json(json['result'])
|
||||
|
||||
|
||||
def get_sampling_profile() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,SamplingHeapProfile]:
|
||||
'''
|
||||
|
||||
|
||||
:returns: Return the sampling profile being collected.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'HeapProfiler.getSamplingProfile',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return SamplingHeapProfile.from_json(json['profile'])
|
||||
|
||||
|
||||
def start_sampling(
|
||||
sampling_interval: typing.Optional[float] = None,
|
||||
include_objects_collected_by_major_gc: typing.Optional[bool] = None,
|
||||
include_objects_collected_by_minor_gc: typing.Optional[bool] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
:param sampling_interval: *(Optional)* Average sample interval in bytes. Poisson distribution is used for the intervals. The default value is 32768 bytes.
|
||||
:param include_objects_collected_by_major_gc: *(Optional)* By default, the sampling heap profiler reports only objects which are still alive when the profile is returned via getSamplingProfile or stopSampling, which is useful for determining what functions contribute the most to steady-state memory usage. This flag instructs the sampling heap profiler to also include information about objects discarded by major GC, which will show which functions cause large temporary memory usage or long GC pauses.
|
||||
:param include_objects_collected_by_minor_gc: *(Optional)* By default, the sampling heap profiler reports only objects which are still alive when the profile is returned via getSamplingProfile or stopSampling, which is useful for determining what functions contribute the most to steady-state memory usage. This flag instructs the sampling heap profiler to also include information about objects discarded by minor GC, which is useful when tuning a latency-sensitive application for minimal GC activity.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if sampling_interval is not None:
|
||||
params['samplingInterval'] = sampling_interval
|
||||
if include_objects_collected_by_major_gc is not None:
|
||||
params['includeObjectsCollectedByMajorGC'] = include_objects_collected_by_major_gc
|
||||
if include_objects_collected_by_minor_gc is not None:
|
||||
params['includeObjectsCollectedByMinorGC'] = include_objects_collected_by_minor_gc
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'HeapProfiler.startSampling',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def start_tracking_heap_objects(
|
||||
track_allocations: typing.Optional[bool] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
:param track_allocations: *(Optional)*
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if track_allocations is not None:
|
||||
params['trackAllocations'] = track_allocations
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'HeapProfiler.startTrackingHeapObjects',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def stop_sampling() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,SamplingHeapProfile]:
|
||||
'''
|
||||
|
||||
|
||||
:returns: Recorded sampling heap profile.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'HeapProfiler.stopSampling',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return SamplingHeapProfile.from_json(json['profile'])
|
||||
|
||||
|
||||
def stop_tracking_heap_objects(
|
||||
report_progress: typing.Optional[bool] = None,
|
||||
treat_global_objects_as_roots: typing.Optional[bool] = None,
|
||||
capture_numeric_value: typing.Optional[bool] = None,
|
||||
expose_internals: typing.Optional[bool] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
:param report_progress: *(Optional)* If true 'reportHeapSnapshotProgress' events will be generated while snapshot is being taken when the tracking is stopped.
|
||||
:param treat_global_objects_as_roots: *(Optional)* Deprecated in favor of ```exposeInternals```.
|
||||
:param capture_numeric_value: *(Optional)* If true, numerical values are included in the snapshot
|
||||
:param expose_internals: **(EXPERIMENTAL)** *(Optional)* If true, exposes internals of the snapshot.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if report_progress is not None:
|
||||
params['reportProgress'] = report_progress
|
||||
if treat_global_objects_as_roots is not None:
|
||||
params['treatGlobalObjectsAsRoots'] = treat_global_objects_as_roots
|
||||
if capture_numeric_value is not None:
|
||||
params['captureNumericValue'] = capture_numeric_value
|
||||
if expose_internals is not None:
|
||||
params['exposeInternals'] = expose_internals
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'HeapProfiler.stopTrackingHeapObjects',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def take_heap_snapshot(
|
||||
report_progress: typing.Optional[bool] = None,
|
||||
treat_global_objects_as_roots: typing.Optional[bool] = None,
|
||||
capture_numeric_value: typing.Optional[bool] = None,
|
||||
expose_internals: typing.Optional[bool] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
:param report_progress: *(Optional)* If true 'reportHeapSnapshotProgress' events will be generated while snapshot is being taken.
|
||||
:param treat_global_objects_as_roots: *(Optional)* If true, a raw snapshot without artificial roots will be generated. Deprecated in favor of ```exposeInternals```.
|
||||
:param capture_numeric_value: *(Optional)* If true, numerical values are included in the snapshot
|
||||
:param expose_internals: **(EXPERIMENTAL)** *(Optional)* If true, exposes internals of the snapshot.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if report_progress is not None:
|
||||
params['reportProgress'] = report_progress
|
||||
if treat_global_objects_as_roots is not None:
|
||||
params['treatGlobalObjectsAsRoots'] = treat_global_objects_as_roots
|
||||
if capture_numeric_value is not None:
|
||||
params['captureNumericValue'] = capture_numeric_value
|
||||
if expose_internals is not None:
|
||||
params['exposeInternals'] = expose_internals
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'HeapProfiler.takeHeapSnapshot',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
@event_class('HeapProfiler.addHeapSnapshotChunk')
|
||||
@dataclass
|
||||
class AddHeapSnapshotChunk:
|
||||
chunk: str
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> AddHeapSnapshotChunk:
|
||||
return cls(
|
||||
chunk=str(json['chunk'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('HeapProfiler.heapStatsUpdate')
|
||||
@dataclass
|
||||
class HeapStatsUpdate:
|
||||
'''
|
||||
If heap objects tracking has been started then backend may send update for one or more fragments
|
||||
'''
|
||||
#: An array of triplets. Each triplet describes a fragment. The first integer is the fragment
|
||||
#: index, the second integer is a total count of objects for the fragment, the third integer is
|
||||
#: a total size of the objects for the fragment.
|
||||
stats_update: typing.List[int]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> HeapStatsUpdate:
|
||||
return cls(
|
||||
stats_update=[int(i) for i in json['statsUpdate']]
|
||||
)
|
||||
|
||||
|
||||
@event_class('HeapProfiler.lastSeenObjectId')
|
||||
@dataclass
|
||||
class LastSeenObjectId:
|
||||
'''
|
||||
If heap objects tracking has been started then backend regularly sends a current value for last
|
||||
seen object id and corresponding timestamp. If the were changes in the heap since last event
|
||||
then one or more heapStatsUpdate events will be sent before a new lastSeenObjectId event.
|
||||
'''
|
||||
last_seen_object_id: int
|
||||
timestamp: float
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> LastSeenObjectId:
|
||||
return cls(
|
||||
last_seen_object_id=int(json['lastSeenObjectId']),
|
||||
timestamp=float(json['timestamp'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('HeapProfiler.reportHeapSnapshotProgress')
|
||||
@dataclass
|
||||
class ReportHeapSnapshotProgress:
|
||||
done: int
|
||||
total: int
|
||||
finished: typing.Optional[bool]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> ReportHeapSnapshotProgress:
|
||||
return cls(
|
||||
done=int(json['done']),
|
||||
total=int(json['total']),
|
||||
finished=bool(json['finished']) if 'finished' in json else None
|
||||
)
|
||||
|
||||
|
||||
@event_class('HeapProfiler.resetProfiles')
|
||||
@dataclass
|
||||
class ResetProfiles:
|
||||
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> ResetProfiles:
|
||||
return cls(
|
||||
|
||||
)
|
@ -0,0 +1,525 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: IndexedDB (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
from . import runtime
|
||||
from . import storage
|
||||
|
||||
|
||||
@dataclass
|
||||
class DatabaseWithObjectStores:
|
||||
'''
|
||||
Database with an array of object stores.
|
||||
'''
|
||||
#: Database name.
|
||||
name: str
|
||||
|
||||
#: Database version (type is not 'integer', as the standard
|
||||
#: requires the version number to be 'unsigned long long')
|
||||
version: float
|
||||
|
||||
#: Object stores in this database.
|
||||
object_stores: typing.List[ObjectStore]
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['name'] = self.name
|
||||
json['version'] = self.version
|
||||
json['objectStores'] = [i.to_json() for i in self.object_stores]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
name=str(json['name']),
|
||||
version=float(json['version']),
|
||||
object_stores=[ObjectStore.from_json(i) for i in json['objectStores']],
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ObjectStore:
|
||||
'''
|
||||
Object store.
|
||||
'''
|
||||
#: Object store name.
|
||||
name: str
|
||||
|
||||
#: Object store key path.
|
||||
key_path: KeyPath
|
||||
|
||||
#: If true, object store has auto increment flag set.
|
||||
auto_increment: bool
|
||||
|
||||
#: Indexes in this object store.
|
||||
indexes: typing.List[ObjectStoreIndex]
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['name'] = self.name
|
||||
json['keyPath'] = self.key_path.to_json()
|
||||
json['autoIncrement'] = self.auto_increment
|
||||
json['indexes'] = [i.to_json() for i in self.indexes]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
name=str(json['name']),
|
||||
key_path=KeyPath.from_json(json['keyPath']),
|
||||
auto_increment=bool(json['autoIncrement']),
|
||||
indexes=[ObjectStoreIndex.from_json(i) for i in json['indexes']],
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ObjectStoreIndex:
|
||||
'''
|
||||
Object store index.
|
||||
'''
|
||||
#: Index name.
|
||||
name: str
|
||||
|
||||
#: Index key path.
|
||||
key_path: KeyPath
|
||||
|
||||
#: If true, index is unique.
|
||||
unique: bool
|
||||
|
||||
#: If true, index allows multiple entries for a key.
|
||||
multi_entry: bool
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['name'] = self.name
|
||||
json['keyPath'] = self.key_path.to_json()
|
||||
json['unique'] = self.unique
|
||||
json['multiEntry'] = self.multi_entry
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
name=str(json['name']),
|
||||
key_path=KeyPath.from_json(json['keyPath']),
|
||||
unique=bool(json['unique']),
|
||||
multi_entry=bool(json['multiEntry']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class Key:
|
||||
'''
|
||||
Key.
|
||||
'''
|
||||
#: Key type.
|
||||
type_: str
|
||||
|
||||
#: Number value.
|
||||
number: typing.Optional[float] = None
|
||||
|
||||
#: String value.
|
||||
string: typing.Optional[str] = None
|
||||
|
||||
#: Date value.
|
||||
date: typing.Optional[float] = None
|
||||
|
||||
#: Array value.
|
||||
array: typing.Optional[typing.List[Key]] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['type'] = self.type_
|
||||
if self.number is not None:
|
||||
json['number'] = self.number
|
||||
if self.string is not None:
|
||||
json['string'] = self.string
|
||||
if self.date is not None:
|
||||
json['date'] = self.date
|
||||
if self.array is not None:
|
||||
json['array'] = [i.to_json() for i in self.array]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
type_=str(json['type']),
|
||||
number=float(json['number']) if 'number' in json else None,
|
||||
string=str(json['string']) if 'string' in json else None,
|
||||
date=float(json['date']) if 'date' in json else None,
|
||||
array=[Key.from_json(i) for i in json['array']] if 'array' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class KeyRange:
|
||||
'''
|
||||
Key range.
|
||||
'''
|
||||
#: If true lower bound is open.
|
||||
lower_open: bool
|
||||
|
||||
#: If true upper bound is open.
|
||||
upper_open: bool
|
||||
|
||||
#: Lower bound.
|
||||
lower: typing.Optional[Key] = None
|
||||
|
||||
#: Upper bound.
|
||||
upper: typing.Optional[Key] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['lowerOpen'] = self.lower_open
|
||||
json['upperOpen'] = self.upper_open
|
||||
if self.lower is not None:
|
||||
json['lower'] = self.lower.to_json()
|
||||
if self.upper is not None:
|
||||
json['upper'] = self.upper.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
lower_open=bool(json['lowerOpen']),
|
||||
upper_open=bool(json['upperOpen']),
|
||||
lower=Key.from_json(json['lower']) if 'lower' in json else None,
|
||||
upper=Key.from_json(json['upper']) if 'upper' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class DataEntry:
|
||||
'''
|
||||
Data entry.
|
||||
'''
|
||||
#: Key object.
|
||||
key: runtime.RemoteObject
|
||||
|
||||
#: Primary key object.
|
||||
primary_key: runtime.RemoteObject
|
||||
|
||||
#: Value object.
|
||||
value: runtime.RemoteObject
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['key'] = self.key.to_json()
|
||||
json['primaryKey'] = self.primary_key.to_json()
|
||||
json['value'] = self.value.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
key=runtime.RemoteObject.from_json(json['key']),
|
||||
primary_key=runtime.RemoteObject.from_json(json['primaryKey']),
|
||||
value=runtime.RemoteObject.from_json(json['value']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class KeyPath:
|
||||
'''
|
||||
Key path.
|
||||
'''
|
||||
#: Key path type.
|
||||
type_: str
|
||||
|
||||
#: String value.
|
||||
string: typing.Optional[str] = None
|
||||
|
||||
#: Array value.
|
||||
array: typing.Optional[typing.List[str]] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['type'] = self.type_
|
||||
if self.string is not None:
|
||||
json['string'] = self.string
|
||||
if self.array is not None:
|
||||
json['array'] = [i for i in self.array]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
type_=str(json['type']),
|
||||
string=str(json['string']) if 'string' in json else None,
|
||||
array=[str(i) for i in json['array']] if 'array' in json else None,
|
||||
)
|
||||
|
||||
|
||||
def clear_object_store(
|
||||
security_origin: typing.Optional[str] = None,
|
||||
storage_key: typing.Optional[str] = None,
|
||||
storage_bucket: typing.Optional[storage.StorageBucket] = None,
|
||||
database_name: str = None,
|
||||
object_store_name: str = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Clears all entries from an object store.
|
||||
|
||||
:param security_origin: *(Optional)* At least and at most one of securityOrigin, storageKey, or storageBucket must be specified. Security origin.
|
||||
:param storage_key: *(Optional)* Storage key.
|
||||
:param storage_bucket: *(Optional)* Storage bucket. If not specified, it uses the default bucket.
|
||||
:param database_name: Database name.
|
||||
:param object_store_name: Object store name.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if security_origin is not None:
|
||||
params['securityOrigin'] = security_origin
|
||||
if storage_key is not None:
|
||||
params['storageKey'] = storage_key
|
||||
if storage_bucket is not None:
|
||||
params['storageBucket'] = storage_bucket.to_json()
|
||||
params['databaseName'] = database_name
|
||||
params['objectStoreName'] = object_store_name
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'IndexedDB.clearObjectStore',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def delete_database(
|
||||
security_origin: typing.Optional[str] = None,
|
||||
storage_key: typing.Optional[str] = None,
|
||||
storage_bucket: typing.Optional[storage.StorageBucket] = None,
|
||||
database_name: str = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Deletes a database.
|
||||
|
||||
:param security_origin: *(Optional)* At least and at most one of securityOrigin, storageKey, or storageBucket must be specified. Security origin.
|
||||
:param storage_key: *(Optional)* Storage key.
|
||||
:param storage_bucket: *(Optional)* Storage bucket. If not specified, it uses the default bucket.
|
||||
:param database_name: Database name.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if security_origin is not None:
|
||||
params['securityOrigin'] = security_origin
|
||||
if storage_key is not None:
|
||||
params['storageKey'] = storage_key
|
||||
if storage_bucket is not None:
|
||||
params['storageBucket'] = storage_bucket.to_json()
|
||||
params['databaseName'] = database_name
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'IndexedDB.deleteDatabase',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def delete_object_store_entries(
|
||||
security_origin: typing.Optional[str] = None,
|
||||
storage_key: typing.Optional[str] = None,
|
||||
storage_bucket: typing.Optional[storage.StorageBucket] = None,
|
||||
database_name: str = None,
|
||||
object_store_name: str = None,
|
||||
key_range: KeyRange = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Delete a range of entries from an object store
|
||||
|
||||
:param security_origin: *(Optional)* At least and at most one of securityOrigin, storageKey, or storageBucket must be specified. Security origin.
|
||||
:param storage_key: *(Optional)* Storage key.
|
||||
:param storage_bucket: *(Optional)* Storage bucket. If not specified, it uses the default bucket.
|
||||
:param database_name:
|
||||
:param object_store_name:
|
||||
:param key_range: Range of entry keys to delete
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if security_origin is not None:
|
||||
params['securityOrigin'] = security_origin
|
||||
if storage_key is not None:
|
||||
params['storageKey'] = storage_key
|
||||
if storage_bucket is not None:
|
||||
params['storageBucket'] = storage_bucket.to_json()
|
||||
params['databaseName'] = database_name
|
||||
params['objectStoreName'] = object_store_name
|
||||
params['keyRange'] = key_range.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'IndexedDB.deleteObjectStoreEntries',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Disables events from backend.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'IndexedDB.disable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def enable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enables events from backend.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'IndexedDB.enable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def request_data(
|
||||
security_origin: typing.Optional[str] = None,
|
||||
storage_key: typing.Optional[str] = None,
|
||||
storage_bucket: typing.Optional[storage.StorageBucket] = None,
|
||||
database_name: str = None,
|
||||
object_store_name: str = None,
|
||||
index_name: str = None,
|
||||
skip_count: int = None,
|
||||
page_size: int = None,
|
||||
key_range: typing.Optional[KeyRange] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.Tuple[typing.List[DataEntry], bool]]:
|
||||
'''
|
||||
Requests data from object store or index.
|
||||
|
||||
:param security_origin: *(Optional)* At least and at most one of securityOrigin, storageKey, or storageBucket must be specified. Security origin.
|
||||
:param storage_key: *(Optional)* Storage key.
|
||||
:param storage_bucket: *(Optional)* Storage bucket. If not specified, it uses the default bucket.
|
||||
:param database_name: Database name.
|
||||
:param object_store_name: Object store name.
|
||||
:param index_name: Index name, empty string for object store data requests.
|
||||
:param skip_count: Number of records to skip.
|
||||
:param page_size: Number of records to fetch.
|
||||
:param key_range: *(Optional)* Key range.
|
||||
:returns: A tuple with the following items:
|
||||
|
||||
0. **objectStoreDataEntries** - Array of object store data entries.
|
||||
1. **hasMore** - If true, there are more entries to fetch in the given range.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if security_origin is not None:
|
||||
params['securityOrigin'] = security_origin
|
||||
if storage_key is not None:
|
||||
params['storageKey'] = storage_key
|
||||
if storage_bucket is not None:
|
||||
params['storageBucket'] = storage_bucket.to_json()
|
||||
params['databaseName'] = database_name
|
||||
params['objectStoreName'] = object_store_name
|
||||
params['indexName'] = index_name
|
||||
params['skipCount'] = skip_count
|
||||
params['pageSize'] = page_size
|
||||
if key_range is not None:
|
||||
params['keyRange'] = key_range.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'IndexedDB.requestData',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return (
|
||||
[DataEntry.from_json(i) for i in json['objectStoreDataEntries']],
|
||||
bool(json['hasMore'])
|
||||
)
|
||||
|
||||
|
||||
def get_metadata(
|
||||
security_origin: typing.Optional[str] = None,
|
||||
storage_key: typing.Optional[str] = None,
|
||||
storage_bucket: typing.Optional[storage.StorageBucket] = None,
|
||||
database_name: str = None,
|
||||
object_store_name: str = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.Tuple[float, float]]:
|
||||
'''
|
||||
Gets metadata of an object store.
|
||||
|
||||
:param security_origin: *(Optional)* At least and at most one of securityOrigin, storageKey, or storageBucket must be specified. Security origin.
|
||||
:param storage_key: *(Optional)* Storage key.
|
||||
:param storage_bucket: *(Optional)* Storage bucket. If not specified, it uses the default bucket.
|
||||
:param database_name: Database name.
|
||||
:param object_store_name: Object store name.
|
||||
:returns: A tuple with the following items:
|
||||
|
||||
0. **entriesCount** - the entries count
|
||||
1. **keyGeneratorValue** - the current value of key generator, to become the next inserted key into the object store. Valid if objectStore.autoIncrement is true.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if security_origin is not None:
|
||||
params['securityOrigin'] = security_origin
|
||||
if storage_key is not None:
|
||||
params['storageKey'] = storage_key
|
||||
if storage_bucket is not None:
|
||||
params['storageBucket'] = storage_bucket.to_json()
|
||||
params['databaseName'] = database_name
|
||||
params['objectStoreName'] = object_store_name
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'IndexedDB.getMetadata',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return (
|
||||
float(json['entriesCount']),
|
||||
float(json['keyGeneratorValue'])
|
||||
)
|
||||
|
||||
|
||||
def request_database(
|
||||
security_origin: typing.Optional[str] = None,
|
||||
storage_key: typing.Optional[str] = None,
|
||||
storage_bucket: typing.Optional[storage.StorageBucket] = None,
|
||||
database_name: str = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,DatabaseWithObjectStores]:
|
||||
'''
|
||||
Requests database with given name in given frame.
|
||||
|
||||
:param security_origin: *(Optional)* At least and at most one of securityOrigin, storageKey, or storageBucket must be specified. Security origin.
|
||||
:param storage_key: *(Optional)* Storage key.
|
||||
:param storage_bucket: *(Optional)* Storage bucket. If not specified, it uses the default bucket.
|
||||
:param database_name: Database name.
|
||||
:returns: Database with an array of object stores.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if security_origin is not None:
|
||||
params['securityOrigin'] = security_origin
|
||||
if storage_key is not None:
|
||||
params['storageKey'] = storage_key
|
||||
if storage_bucket is not None:
|
||||
params['storageBucket'] = storage_bucket.to_json()
|
||||
params['databaseName'] = database_name
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'IndexedDB.requestDatabase',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return DatabaseWithObjectStores.from_json(json['databaseWithObjectStores'])
|
||||
|
||||
|
||||
def request_database_names(
|
||||
security_origin: typing.Optional[str] = None,
|
||||
storage_key: typing.Optional[str] = None,
|
||||
storage_bucket: typing.Optional[storage.StorageBucket] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[str]]:
|
||||
'''
|
||||
Requests database names for given security origin.
|
||||
|
||||
:param security_origin: *(Optional)* At least and at most one of securityOrigin, storageKey, or storageBucket must be specified. Security origin.
|
||||
:param storage_key: *(Optional)* Storage key.
|
||||
:param storage_bucket: *(Optional)* Storage bucket. If not specified, it uses the default bucket.
|
||||
:returns: Database names for origin.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if security_origin is not None:
|
||||
params['securityOrigin'] = security_origin
|
||||
if storage_key is not None:
|
||||
params['storageKey'] = storage_key
|
||||
if storage_bucket is not None:
|
||||
params['storageBucket'] = storage_bucket.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'IndexedDB.requestDatabaseNames',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [str(i) for i in json['databaseNames']]
|
699
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/input_.py
Executable file
699
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/input_.py
Executable file
@ -0,0 +1,699 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: Input
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
|
||||
@dataclass
|
||||
class TouchPoint:
|
||||
#: X coordinate of the event relative to the main frame's viewport in CSS pixels.
|
||||
x: float
|
||||
|
||||
#: Y coordinate of the event relative to the main frame's viewport in CSS pixels. 0 refers to
|
||||
#: the top of the viewport and Y increases as it proceeds towards the bottom of the viewport.
|
||||
y: float
|
||||
|
||||
#: X radius of the touch area (default: 1.0).
|
||||
radius_x: typing.Optional[float] = None
|
||||
|
||||
#: Y radius of the touch area (default: 1.0).
|
||||
radius_y: typing.Optional[float] = None
|
||||
|
||||
#: Rotation angle (default: 0.0).
|
||||
rotation_angle: typing.Optional[float] = None
|
||||
|
||||
#: Force (default: 1.0).
|
||||
force: typing.Optional[float] = None
|
||||
|
||||
#: The normalized tangential pressure, which has a range of [-1,1] (default: 0).
|
||||
tangential_pressure: typing.Optional[float] = None
|
||||
|
||||
#: The plane angle between the Y-Z plane and the plane containing both the stylus axis and the Y axis, in degrees of the range [-90,90], a positive tiltX is to the right (default: 0)
|
||||
tilt_x: typing.Optional[float] = None
|
||||
|
||||
#: The plane angle between the X-Z plane and the plane containing both the stylus axis and the X axis, in degrees of the range [-90,90], a positive tiltY is towards the user (default: 0).
|
||||
tilt_y: typing.Optional[float] = None
|
||||
|
||||
#: The clockwise rotation of a pen stylus around its own major axis, in degrees in the range [0,359] (default: 0).
|
||||
twist: typing.Optional[int] = None
|
||||
|
||||
#: Identifier used to track touch sources between events, must be unique within an event.
|
||||
id_: typing.Optional[float] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['x'] = self.x
|
||||
json['y'] = self.y
|
||||
if self.radius_x is not None:
|
||||
json['radiusX'] = self.radius_x
|
||||
if self.radius_y is not None:
|
||||
json['radiusY'] = self.radius_y
|
||||
if self.rotation_angle is not None:
|
||||
json['rotationAngle'] = self.rotation_angle
|
||||
if self.force is not None:
|
||||
json['force'] = self.force
|
||||
if self.tangential_pressure is not None:
|
||||
json['tangentialPressure'] = self.tangential_pressure
|
||||
if self.tilt_x is not None:
|
||||
json['tiltX'] = self.tilt_x
|
||||
if self.tilt_y is not None:
|
||||
json['tiltY'] = self.tilt_y
|
||||
if self.twist is not None:
|
||||
json['twist'] = self.twist
|
||||
if self.id_ is not None:
|
||||
json['id'] = self.id_
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
x=float(json['x']),
|
||||
y=float(json['y']),
|
||||
radius_x=float(json['radiusX']) if 'radiusX' in json else None,
|
||||
radius_y=float(json['radiusY']) if 'radiusY' in json else None,
|
||||
rotation_angle=float(json['rotationAngle']) if 'rotationAngle' in json else None,
|
||||
force=float(json['force']) if 'force' in json else None,
|
||||
tangential_pressure=float(json['tangentialPressure']) if 'tangentialPressure' in json else None,
|
||||
tilt_x=float(json['tiltX']) if 'tiltX' in json else None,
|
||||
tilt_y=float(json['tiltY']) if 'tiltY' in json else None,
|
||||
twist=int(json['twist']) if 'twist' in json else None,
|
||||
id_=float(json['id']) if 'id' in json else None,
|
||||
)
|
||||
|
||||
|
||||
class GestureSourceType(enum.Enum):
|
||||
DEFAULT = "default"
|
||||
TOUCH = "touch"
|
||||
MOUSE = "mouse"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
class MouseButton(enum.Enum):
|
||||
NONE = "none"
|
||||
LEFT = "left"
|
||||
MIDDLE = "middle"
|
||||
RIGHT = "right"
|
||||
BACK = "back"
|
||||
FORWARD = "forward"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
class TimeSinceEpoch(float):
|
||||
'''
|
||||
UTC time in seconds, counted from January 1, 1970.
|
||||
'''
|
||||
def to_json(self) -> float:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: float) -> TimeSinceEpoch:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'TimeSinceEpoch({})'.format(super().__repr__())
|
||||
|
||||
|
||||
@dataclass
|
||||
class DragDataItem:
|
||||
#: Mime type of the dragged data.
|
||||
mime_type: str
|
||||
|
||||
#: Depending of the value of ``mimeType``, it contains the dragged link,
|
||||
#: text, HTML markup or any other data.
|
||||
data: str
|
||||
|
||||
#: Title associated with a link. Only valid when ``mimeType`` == "text/uri-list".
|
||||
title: typing.Optional[str] = None
|
||||
|
||||
#: Stores the base URL for the contained markup. Only valid when ``mimeType``
|
||||
#: == "text/html".
|
||||
base_url: typing.Optional[str] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['mimeType'] = self.mime_type
|
||||
json['data'] = self.data
|
||||
if self.title is not None:
|
||||
json['title'] = self.title
|
||||
if self.base_url is not None:
|
||||
json['baseURL'] = self.base_url
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
mime_type=str(json['mimeType']),
|
||||
data=str(json['data']),
|
||||
title=str(json['title']) if 'title' in json else None,
|
||||
base_url=str(json['baseURL']) if 'baseURL' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class DragData:
|
||||
items: typing.List[DragDataItem]
|
||||
|
||||
#: Bit field representing allowed drag operations. Copy = 1, Link = 2, Move = 16
|
||||
drag_operations_mask: int
|
||||
|
||||
#: List of filenames that should be included when dropping
|
||||
files: typing.Optional[typing.List[str]] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['items'] = [i.to_json() for i in self.items]
|
||||
json['dragOperationsMask'] = self.drag_operations_mask
|
||||
if self.files is not None:
|
||||
json['files'] = [i for i in self.files]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
items=[DragDataItem.from_json(i) for i in json['items']],
|
||||
drag_operations_mask=int(json['dragOperationsMask']),
|
||||
files=[str(i) for i in json['files']] if 'files' in json else None,
|
||||
)
|
||||
|
||||
|
||||
def dispatch_drag_event(
|
||||
type_: str,
|
||||
x: float,
|
||||
y: float,
|
||||
data: DragData,
|
||||
modifiers: typing.Optional[int] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Dispatches a drag event into the page.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param type_: Type of the drag event.
|
||||
:param x: X coordinate of the event relative to the main frame's viewport in CSS pixels.
|
||||
:param y: Y coordinate of the event relative to the main frame's viewport in CSS pixels. 0 refers to the top of the viewport and Y increases as it proceeds towards the bottom of the viewport.
|
||||
:param data:
|
||||
:param modifiers: *(Optional)* Bit field representing pressed modifier keys. Alt=1, Ctrl=2, Meta/Command=4, Shift=8 (default: 0).
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['type'] = type_
|
||||
params['x'] = x
|
||||
params['y'] = y
|
||||
params['data'] = data.to_json()
|
||||
if modifiers is not None:
|
||||
params['modifiers'] = modifiers
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Input.dispatchDragEvent',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def dispatch_key_event(
|
||||
type_: str,
|
||||
modifiers: typing.Optional[int] = None,
|
||||
timestamp: typing.Optional[TimeSinceEpoch] = None,
|
||||
text: typing.Optional[str] = None,
|
||||
unmodified_text: typing.Optional[str] = None,
|
||||
key_identifier: typing.Optional[str] = None,
|
||||
code: typing.Optional[str] = None,
|
||||
key: typing.Optional[str] = None,
|
||||
windows_virtual_key_code: typing.Optional[int] = None,
|
||||
native_virtual_key_code: typing.Optional[int] = None,
|
||||
auto_repeat: typing.Optional[bool] = None,
|
||||
is_keypad: typing.Optional[bool] = None,
|
||||
is_system_key: typing.Optional[bool] = None,
|
||||
location: typing.Optional[int] = None,
|
||||
commands: typing.Optional[typing.List[str]] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Dispatches a key event to the page.
|
||||
|
||||
:param type_: Type of the key event.
|
||||
:param modifiers: *(Optional)* Bit field representing pressed modifier keys. Alt=1, Ctrl=2, Meta/Command=4, Shift=8 (default: 0).
|
||||
:param timestamp: *(Optional)* Time at which the event occurred.
|
||||
:param text: *(Optional)* Text as generated by processing a virtual key code with a keyboard layout. Not needed for for ```keyUp```` and ````rawKeyDown```` events (default: "")
|
||||
:param unmodified_text: *(Optional)* Text that would have been generated by the keyboard if no modifiers were pressed (except for shift). Useful for shortcut (accelerator) key handling (default: "").
|
||||
:param key_identifier: *(Optional)* Unique key identifier (e.g., 'U+0041') (default: "").
|
||||
:param code: *(Optional)* Unique DOM defined string value for each physical key (e.g., 'KeyA') (default: "").
|
||||
:param key: *(Optional)* Unique DOM defined string value describing the meaning of the key in the context of active modifiers, keyboard layout, etc (e.g., 'AltGr') (default: "").
|
||||
:param windows_virtual_key_code: *(Optional)* Windows virtual key code (default: 0).
|
||||
:param native_virtual_key_code: *(Optional)* Native virtual key code (default: 0).
|
||||
:param auto_repeat: *(Optional)* Whether the event was generated from auto repeat (default: false).
|
||||
:param is_keypad: *(Optional)* Whether the event was generated from the keypad (default: false).
|
||||
:param is_system_key: *(Optional)* Whether the event was a system key event (default: false).
|
||||
:param location: *(Optional)* Whether the event was from the left or right side of the keyboard. 1=Left, 2=Right (default: 0).
|
||||
:param commands: **(EXPERIMENTAL)** *(Optional)* Editing commands to send with the key event (e.g., 'selectAll') (default: []). These are related to but not equal the command names used in ````document.execCommand``` and NSStandardKeyBindingResponding. See https://source.chromium.org/chromium/chromium/src/+/main:third_party/blink/renderer/core/editing/commands/editor_command_names.h for valid command names.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['type'] = type_
|
||||
if modifiers is not None:
|
||||
params['modifiers'] = modifiers
|
||||
if timestamp is not None:
|
||||
params['timestamp'] = timestamp.to_json()
|
||||
if text is not None:
|
||||
params['text'] = text
|
||||
if unmodified_text is not None:
|
||||
params['unmodifiedText'] = unmodified_text
|
||||
if key_identifier is not None:
|
||||
params['keyIdentifier'] = key_identifier
|
||||
if code is not None:
|
||||
params['code'] = code
|
||||
if key is not None:
|
||||
params['key'] = key
|
||||
if windows_virtual_key_code is not None:
|
||||
params['windowsVirtualKeyCode'] = windows_virtual_key_code
|
||||
if native_virtual_key_code is not None:
|
||||
params['nativeVirtualKeyCode'] = native_virtual_key_code
|
||||
if auto_repeat is not None:
|
||||
params['autoRepeat'] = auto_repeat
|
||||
if is_keypad is not None:
|
||||
params['isKeypad'] = is_keypad
|
||||
if is_system_key is not None:
|
||||
params['isSystemKey'] = is_system_key
|
||||
if location is not None:
|
||||
params['location'] = location
|
||||
if commands is not None:
|
||||
params['commands'] = [i for i in commands]
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Input.dispatchKeyEvent',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def insert_text(
|
||||
text: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
This method emulates inserting text that doesn't come from a key press,
|
||||
for example an emoji keyboard or an IME.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param text: The text to insert.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['text'] = text
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Input.insertText',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def ime_set_composition(
|
||||
text: str,
|
||||
selection_start: int,
|
||||
selection_end: int,
|
||||
replacement_start: typing.Optional[int] = None,
|
||||
replacement_end: typing.Optional[int] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
This method sets the current candidate text for IME.
|
||||
Use imeCommitComposition to commit the final text.
|
||||
Use imeSetComposition with empty string as text to cancel composition.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param text: The text to insert
|
||||
:param selection_start: selection start
|
||||
:param selection_end: selection end
|
||||
:param replacement_start: *(Optional)* replacement start
|
||||
:param replacement_end: *(Optional)* replacement end
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['text'] = text
|
||||
params['selectionStart'] = selection_start
|
||||
params['selectionEnd'] = selection_end
|
||||
if replacement_start is not None:
|
||||
params['replacementStart'] = replacement_start
|
||||
if replacement_end is not None:
|
||||
params['replacementEnd'] = replacement_end
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Input.imeSetComposition',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def dispatch_mouse_event(
|
||||
type_: str,
|
||||
x: float,
|
||||
y: float,
|
||||
modifiers: typing.Optional[int] = None,
|
||||
timestamp: typing.Optional[TimeSinceEpoch] = None,
|
||||
button: typing.Optional[MouseButton] = None,
|
||||
buttons: typing.Optional[int] = None,
|
||||
click_count: typing.Optional[int] = None,
|
||||
force: typing.Optional[float] = None,
|
||||
tangential_pressure: typing.Optional[float] = None,
|
||||
tilt_x: typing.Optional[float] = None,
|
||||
tilt_y: typing.Optional[float] = None,
|
||||
twist: typing.Optional[int] = None,
|
||||
delta_x: typing.Optional[float] = None,
|
||||
delta_y: typing.Optional[float] = None,
|
||||
pointer_type: typing.Optional[str] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Dispatches a mouse event to the page.
|
||||
|
||||
:param type_: Type of the mouse event.
|
||||
:param x: X coordinate of the event relative to the main frame's viewport in CSS pixels.
|
||||
:param y: Y coordinate of the event relative to the main frame's viewport in CSS pixels. 0 refers to the top of the viewport and Y increases as it proceeds towards the bottom of the viewport.
|
||||
:param modifiers: *(Optional)* Bit field representing pressed modifier keys. Alt=1, Ctrl=2, Meta/Command=4, Shift=8 (default: 0).
|
||||
:param timestamp: *(Optional)* Time at which the event occurred.
|
||||
:param button: *(Optional)* Mouse button (default: "none").
|
||||
:param buttons: *(Optional)* A number indicating which buttons are pressed on the mouse when a mouse event is triggered. Left=1, Right=2, Middle=4, Back=8, Forward=16, None=0.
|
||||
:param click_count: *(Optional)* Number of times the mouse button was clicked (default: 0).
|
||||
:param force: **(EXPERIMENTAL)** *(Optional)* The normalized pressure, which has a range of [0,1] (default: 0).
|
||||
:param tangential_pressure: **(EXPERIMENTAL)** *(Optional)* The normalized tangential pressure, which has a range of [-1,1] (default: 0).
|
||||
:param tilt_x: *(Optional)* The plane angle between the Y-Z plane and the plane containing both the stylus axis and the Y axis, in degrees of the range [-90,90], a positive tiltX is to the right (default: 0).
|
||||
:param tilt_y: *(Optional)* The plane angle between the X-Z plane and the plane containing both the stylus axis and the X axis, in degrees of the range [-90,90], a positive tiltY is towards the user (default: 0).
|
||||
:param twist: **(EXPERIMENTAL)** *(Optional)* The clockwise rotation of a pen stylus around its own major axis, in degrees in the range [0,359] (default: 0).
|
||||
:param delta_x: *(Optional)* X delta in CSS pixels for mouse wheel event (default: 0).
|
||||
:param delta_y: *(Optional)* Y delta in CSS pixels for mouse wheel event (default: 0).
|
||||
:param pointer_type: *(Optional)* Pointer type (default: "mouse").
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['type'] = type_
|
||||
params['x'] = x
|
||||
params['y'] = y
|
||||
if modifiers is not None:
|
||||
params['modifiers'] = modifiers
|
||||
if timestamp is not None:
|
||||
params['timestamp'] = timestamp.to_json()
|
||||
if button is not None:
|
||||
params['button'] = button.to_json()
|
||||
if buttons is not None:
|
||||
params['buttons'] = buttons
|
||||
if click_count is not None:
|
||||
params['clickCount'] = click_count
|
||||
if force is not None:
|
||||
params['force'] = force
|
||||
if tangential_pressure is not None:
|
||||
params['tangentialPressure'] = tangential_pressure
|
||||
if tilt_x is not None:
|
||||
params['tiltX'] = tilt_x
|
||||
if tilt_y is not None:
|
||||
params['tiltY'] = tilt_y
|
||||
if twist is not None:
|
||||
params['twist'] = twist
|
||||
if delta_x is not None:
|
||||
params['deltaX'] = delta_x
|
||||
if delta_y is not None:
|
||||
params['deltaY'] = delta_y
|
||||
if pointer_type is not None:
|
||||
params['pointerType'] = pointer_type
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Input.dispatchMouseEvent',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def dispatch_touch_event(
|
||||
type_: str,
|
||||
touch_points: typing.List[TouchPoint],
|
||||
modifiers: typing.Optional[int] = None,
|
||||
timestamp: typing.Optional[TimeSinceEpoch] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Dispatches a touch event to the page.
|
||||
|
||||
:param type_: Type of the touch event. TouchEnd and TouchCancel must not contain any touch points, while TouchStart and TouchMove must contains at least one.
|
||||
:param touch_points: Active touch points on the touch device. One event per any changed point (compared to previous touch event in a sequence) is generated, emulating pressing/moving/releasing points one by one.
|
||||
:param modifiers: *(Optional)* Bit field representing pressed modifier keys. Alt=1, Ctrl=2, Meta/Command=4, Shift=8 (default: 0).
|
||||
:param timestamp: *(Optional)* Time at which the event occurred.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['type'] = type_
|
||||
params['touchPoints'] = [i.to_json() for i in touch_points]
|
||||
if modifiers is not None:
|
||||
params['modifiers'] = modifiers
|
||||
if timestamp is not None:
|
||||
params['timestamp'] = timestamp.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Input.dispatchTouchEvent',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def cancel_dragging() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Cancels any active dragging in the page.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Input.cancelDragging',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def emulate_touch_from_mouse_event(
|
||||
type_: str,
|
||||
x: int,
|
||||
y: int,
|
||||
button: MouseButton,
|
||||
timestamp: typing.Optional[TimeSinceEpoch] = None,
|
||||
delta_x: typing.Optional[float] = None,
|
||||
delta_y: typing.Optional[float] = None,
|
||||
modifiers: typing.Optional[int] = None,
|
||||
click_count: typing.Optional[int] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Emulates touch event from the mouse event parameters.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param type_: Type of the mouse event.
|
||||
:param x: X coordinate of the mouse pointer in DIP.
|
||||
:param y: Y coordinate of the mouse pointer in DIP.
|
||||
:param button: Mouse button. Only "none", "left", "right" are supported.
|
||||
:param timestamp: *(Optional)* Time at which the event occurred (default: current time).
|
||||
:param delta_x: *(Optional)* X delta in DIP for mouse wheel event (default: 0).
|
||||
:param delta_y: *(Optional)* Y delta in DIP for mouse wheel event (default: 0).
|
||||
:param modifiers: *(Optional)* Bit field representing pressed modifier keys. Alt=1, Ctrl=2, Meta/Command=4, Shift=8 (default: 0).
|
||||
:param click_count: *(Optional)* Number of times the mouse button was clicked (default: 0).
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['type'] = type_
|
||||
params['x'] = x
|
||||
params['y'] = y
|
||||
params['button'] = button.to_json()
|
||||
if timestamp is not None:
|
||||
params['timestamp'] = timestamp.to_json()
|
||||
if delta_x is not None:
|
||||
params['deltaX'] = delta_x
|
||||
if delta_y is not None:
|
||||
params['deltaY'] = delta_y
|
||||
if modifiers is not None:
|
||||
params['modifiers'] = modifiers
|
||||
if click_count is not None:
|
||||
params['clickCount'] = click_count
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Input.emulateTouchFromMouseEvent',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_ignore_input_events(
|
||||
ignore: bool
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Ignores input events (useful while auditing page).
|
||||
|
||||
:param ignore: Ignores input events processing when set to true.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['ignore'] = ignore
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Input.setIgnoreInputEvents',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_intercept_drags(
|
||||
enabled: bool
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Prevents default drag and drop behavior and instead emits ``Input.dragIntercepted`` events.
|
||||
Drag and drop behavior can be directly controlled via ``Input.dispatchDragEvent``.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param enabled:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['enabled'] = enabled
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Input.setInterceptDrags',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def synthesize_pinch_gesture(
|
||||
x: float,
|
||||
y: float,
|
||||
scale_factor: float,
|
||||
relative_speed: typing.Optional[int] = None,
|
||||
gesture_source_type: typing.Optional[GestureSourceType] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Synthesizes a pinch gesture over a time period by issuing appropriate touch events.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param x: X coordinate of the start of the gesture in CSS pixels.
|
||||
:param y: Y coordinate of the start of the gesture in CSS pixels.
|
||||
:param scale_factor: Relative scale factor after zooming (>1.0 zooms in, <1.0 zooms out).
|
||||
:param relative_speed: *(Optional)* Relative pointer speed in pixels per second (default: 800).
|
||||
:param gesture_source_type: *(Optional)* Which type of input events to be generated (default: 'default', which queries the platform for the preferred input type).
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['x'] = x
|
||||
params['y'] = y
|
||||
params['scaleFactor'] = scale_factor
|
||||
if relative_speed is not None:
|
||||
params['relativeSpeed'] = relative_speed
|
||||
if gesture_source_type is not None:
|
||||
params['gestureSourceType'] = gesture_source_type.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Input.synthesizePinchGesture',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def synthesize_scroll_gesture(
|
||||
x: float,
|
||||
y: float,
|
||||
x_distance: typing.Optional[float] = None,
|
||||
y_distance: typing.Optional[float] = None,
|
||||
x_overscroll: typing.Optional[float] = None,
|
||||
y_overscroll: typing.Optional[float] = None,
|
||||
prevent_fling: typing.Optional[bool] = None,
|
||||
speed: typing.Optional[int] = None,
|
||||
gesture_source_type: typing.Optional[GestureSourceType] = None,
|
||||
repeat_count: typing.Optional[int] = None,
|
||||
repeat_delay_ms: typing.Optional[int] = None,
|
||||
interaction_marker_name: typing.Optional[str] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Synthesizes a scroll gesture over a time period by issuing appropriate touch events.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param x: X coordinate of the start of the gesture in CSS pixels.
|
||||
:param y: Y coordinate of the start of the gesture in CSS pixels.
|
||||
:param x_distance: *(Optional)* The distance to scroll along the X axis (positive to scroll left).
|
||||
:param y_distance: *(Optional)* The distance to scroll along the Y axis (positive to scroll up).
|
||||
:param x_overscroll: *(Optional)* The number of additional pixels to scroll back along the X axis, in addition to the given distance.
|
||||
:param y_overscroll: *(Optional)* The number of additional pixels to scroll back along the Y axis, in addition to the given distance.
|
||||
:param prevent_fling: *(Optional)* Prevent fling (default: true).
|
||||
:param speed: *(Optional)* Swipe speed in pixels per second (default: 800).
|
||||
:param gesture_source_type: *(Optional)* Which type of input events to be generated (default: 'default', which queries the platform for the preferred input type).
|
||||
:param repeat_count: *(Optional)* The number of times to repeat the gesture (default: 0).
|
||||
:param repeat_delay_ms: *(Optional)* The number of milliseconds delay between each repeat. (default: 250).
|
||||
:param interaction_marker_name: *(Optional)* The name of the interaction markers to generate, if not empty (default: "").
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['x'] = x
|
||||
params['y'] = y
|
||||
if x_distance is not None:
|
||||
params['xDistance'] = x_distance
|
||||
if y_distance is not None:
|
||||
params['yDistance'] = y_distance
|
||||
if x_overscroll is not None:
|
||||
params['xOverscroll'] = x_overscroll
|
||||
if y_overscroll is not None:
|
||||
params['yOverscroll'] = y_overscroll
|
||||
if prevent_fling is not None:
|
||||
params['preventFling'] = prevent_fling
|
||||
if speed is not None:
|
||||
params['speed'] = speed
|
||||
if gesture_source_type is not None:
|
||||
params['gestureSourceType'] = gesture_source_type.to_json()
|
||||
if repeat_count is not None:
|
||||
params['repeatCount'] = repeat_count
|
||||
if repeat_delay_ms is not None:
|
||||
params['repeatDelayMs'] = repeat_delay_ms
|
||||
if interaction_marker_name is not None:
|
||||
params['interactionMarkerName'] = interaction_marker_name
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Input.synthesizeScrollGesture',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def synthesize_tap_gesture(
|
||||
x: float,
|
||||
y: float,
|
||||
duration: typing.Optional[int] = None,
|
||||
tap_count: typing.Optional[int] = None,
|
||||
gesture_source_type: typing.Optional[GestureSourceType] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Synthesizes a tap gesture over a time period by issuing appropriate touch events.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param x: X coordinate of the start of the gesture in CSS pixels.
|
||||
:param y: Y coordinate of the start of the gesture in CSS pixels.
|
||||
:param duration: *(Optional)* Duration between touchdown and touchup events in ms (default: 50).
|
||||
:param tap_count: *(Optional)* Number of times to perform the tap (e.g. 2 for double tap, default: 1).
|
||||
:param gesture_source_type: *(Optional)* Which type of input events to be generated (default: 'default', which queries the platform for the preferred input type).
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['x'] = x
|
||||
params['y'] = y
|
||||
if duration is not None:
|
||||
params['duration'] = duration
|
||||
if tap_count is not None:
|
||||
params['tapCount'] = tap_count
|
||||
if gesture_source_type is not None:
|
||||
params['gestureSourceType'] = gesture_source_type.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Input.synthesizeTapGesture',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
@event_class('Input.dragIntercepted')
|
||||
@dataclass
|
||||
class DragIntercepted:
|
||||
'''
|
||||
**EXPERIMENTAL**
|
||||
|
||||
Emitted only when ``Input.setInterceptDrags`` is enabled. Use this data with ``Input.dispatchDragEvent`` to
|
||||
restore normal drag and drop behavior.
|
||||
'''
|
||||
data: DragData
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> DragIntercepted:
|
||||
return cls(
|
||||
data=DragData.from_json(json['data'])
|
||||
)
|
@ -0,0 +1,76 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: Inspector (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
|
||||
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Disables inspector domain notifications.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Inspector.disable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def enable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enables inspector domain notifications.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Inspector.enable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
@event_class('Inspector.detached')
|
||||
@dataclass
|
||||
class Detached:
|
||||
'''
|
||||
Fired when remote debugging connection is about to be terminated. Contains detach reason.
|
||||
'''
|
||||
#: The reason why connection has been terminated.
|
||||
reason: str
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> Detached:
|
||||
return cls(
|
||||
reason=str(json['reason'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('Inspector.targetCrashed')
|
||||
@dataclass
|
||||
class TargetCrashed:
|
||||
'''
|
||||
Fired when debugging target has crashed
|
||||
'''
|
||||
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> TargetCrashed:
|
||||
return cls(
|
||||
|
||||
)
|
||||
|
||||
|
||||
@event_class('Inspector.targetReloadedAfterCrash')
|
||||
@dataclass
|
||||
class TargetReloadedAfterCrash:
|
||||
'''
|
||||
Fired when debugging target has reloaded after crash
|
||||
'''
|
||||
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> TargetReloadedAfterCrash:
|
||||
return cls(
|
||||
|
||||
)
|
99
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/io.py
Executable file
99
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/io.py
Executable file
@ -0,0 +1,99 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: IO
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
from . import runtime
|
||||
|
||||
|
||||
class StreamHandle(str):
|
||||
'''
|
||||
This is either obtained from another method or specified as ``blob:<uuid>`` where
|
||||
``<uuid>`` is an UUID of a Blob.
|
||||
'''
|
||||
def to_json(self) -> str:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: str) -> StreamHandle:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'StreamHandle({})'.format(super().__repr__())
|
||||
|
||||
|
||||
def close(
|
||||
handle: StreamHandle
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Close the stream, discard any temporary backing storage.
|
||||
|
||||
:param handle: Handle of the stream to close.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['handle'] = handle.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'IO.close',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def read(
|
||||
handle: StreamHandle,
|
||||
offset: typing.Optional[int] = None,
|
||||
size: typing.Optional[int] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.Tuple[typing.Optional[bool], str, bool]]:
|
||||
'''
|
||||
Read a chunk of the stream
|
||||
|
||||
:param handle: Handle of the stream to read.
|
||||
:param offset: *(Optional)* Seek to the specified offset before reading (if not specified, proceed with offset following the last read). Some types of streams may only support sequential reads.
|
||||
:param size: *(Optional)* Maximum number of bytes to read (left upon the agent discretion if not specified).
|
||||
:returns: A tuple with the following items:
|
||||
|
||||
0. **base64Encoded** - *(Optional)* Set if the data is base64-encoded
|
||||
1. **data** - Data that were read.
|
||||
2. **eof** - Set if the end-of-file condition occurred while reading.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['handle'] = handle.to_json()
|
||||
if offset is not None:
|
||||
params['offset'] = offset
|
||||
if size is not None:
|
||||
params['size'] = size
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'IO.read',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return (
|
||||
bool(json['base64Encoded']) if 'base64Encoded' in json else None,
|
||||
str(json['data']),
|
||||
bool(json['eof'])
|
||||
)
|
||||
|
||||
|
||||
def resolve_blob(
|
||||
object_id: runtime.RemoteObjectId
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,str]:
|
||||
'''
|
||||
Return UUID of Blob object specified by a remote object id.
|
||||
|
||||
:param object_id: Object id of a Blob object wrapper.
|
||||
:returns: UUID of the specified Blob.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['objectId'] = object_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'IO.resolveBlob',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return str(json['uuid'])
|
@ -0,0 +1,462 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: LayerTree (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
from . import dom
|
||||
|
||||
|
||||
class LayerId(str):
|
||||
'''
|
||||
Unique Layer identifier.
|
||||
'''
|
||||
def to_json(self) -> str:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: str) -> LayerId:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'LayerId({})'.format(super().__repr__())
|
||||
|
||||
|
||||
class SnapshotId(str):
|
||||
'''
|
||||
Unique snapshot identifier.
|
||||
'''
|
||||
def to_json(self) -> str:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: str) -> SnapshotId:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'SnapshotId({})'.format(super().__repr__())
|
||||
|
||||
|
||||
@dataclass
|
||||
class ScrollRect:
|
||||
'''
|
||||
Rectangle where scrolling happens on the main thread.
|
||||
'''
|
||||
#: Rectangle itself.
|
||||
rect: dom.Rect
|
||||
|
||||
#: Reason for rectangle to force scrolling on the main thread
|
||||
type_: str
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['rect'] = self.rect.to_json()
|
||||
json['type'] = self.type_
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
rect=dom.Rect.from_json(json['rect']),
|
||||
type_=str(json['type']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class StickyPositionConstraint:
|
||||
'''
|
||||
Sticky position constraints.
|
||||
'''
|
||||
#: Layout rectangle of the sticky element before being shifted
|
||||
sticky_box_rect: dom.Rect
|
||||
|
||||
#: Layout rectangle of the containing block of the sticky element
|
||||
containing_block_rect: dom.Rect
|
||||
|
||||
#: The nearest sticky layer that shifts the sticky box
|
||||
nearest_layer_shifting_sticky_box: typing.Optional[LayerId] = None
|
||||
|
||||
#: The nearest sticky layer that shifts the containing block
|
||||
nearest_layer_shifting_containing_block: typing.Optional[LayerId] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['stickyBoxRect'] = self.sticky_box_rect.to_json()
|
||||
json['containingBlockRect'] = self.containing_block_rect.to_json()
|
||||
if self.nearest_layer_shifting_sticky_box is not None:
|
||||
json['nearestLayerShiftingStickyBox'] = self.nearest_layer_shifting_sticky_box.to_json()
|
||||
if self.nearest_layer_shifting_containing_block is not None:
|
||||
json['nearestLayerShiftingContainingBlock'] = self.nearest_layer_shifting_containing_block.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
sticky_box_rect=dom.Rect.from_json(json['stickyBoxRect']),
|
||||
containing_block_rect=dom.Rect.from_json(json['containingBlockRect']),
|
||||
nearest_layer_shifting_sticky_box=LayerId.from_json(json['nearestLayerShiftingStickyBox']) if 'nearestLayerShiftingStickyBox' in json else None,
|
||||
nearest_layer_shifting_containing_block=LayerId.from_json(json['nearestLayerShiftingContainingBlock']) if 'nearestLayerShiftingContainingBlock' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class PictureTile:
|
||||
'''
|
||||
Serialized fragment of layer picture along with its offset within the layer.
|
||||
'''
|
||||
#: Offset from owning layer left boundary
|
||||
x: float
|
||||
|
||||
#: Offset from owning layer top boundary
|
||||
y: float
|
||||
|
||||
#: Base64-encoded snapshot data.
|
||||
picture: str
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['x'] = self.x
|
||||
json['y'] = self.y
|
||||
json['picture'] = self.picture
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
x=float(json['x']),
|
||||
y=float(json['y']),
|
||||
picture=str(json['picture']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class Layer:
|
||||
'''
|
||||
Information about a compositing layer.
|
||||
'''
|
||||
#: The unique id for this layer.
|
||||
layer_id: LayerId
|
||||
|
||||
#: Offset from parent layer, X coordinate.
|
||||
offset_x: float
|
||||
|
||||
#: Offset from parent layer, Y coordinate.
|
||||
offset_y: float
|
||||
|
||||
#: Layer width.
|
||||
width: float
|
||||
|
||||
#: Layer height.
|
||||
height: float
|
||||
|
||||
#: Indicates how many time this layer has painted.
|
||||
paint_count: int
|
||||
|
||||
#: Indicates whether this layer hosts any content, rather than being used for
|
||||
#: transform/scrolling purposes only.
|
||||
draws_content: bool
|
||||
|
||||
#: The id of parent (not present for root).
|
||||
parent_layer_id: typing.Optional[LayerId] = None
|
||||
|
||||
#: The backend id for the node associated with this layer.
|
||||
backend_node_id: typing.Optional[dom.BackendNodeId] = None
|
||||
|
||||
#: Transformation matrix for layer, default is identity matrix
|
||||
transform: typing.Optional[typing.List[float]] = None
|
||||
|
||||
#: Transform anchor point X, absent if no transform specified
|
||||
anchor_x: typing.Optional[float] = None
|
||||
|
||||
#: Transform anchor point Y, absent if no transform specified
|
||||
anchor_y: typing.Optional[float] = None
|
||||
|
||||
#: Transform anchor point Z, absent if no transform specified
|
||||
anchor_z: typing.Optional[float] = None
|
||||
|
||||
#: Set if layer is not visible.
|
||||
invisible: typing.Optional[bool] = None
|
||||
|
||||
#: Rectangles scrolling on main thread only.
|
||||
scroll_rects: typing.Optional[typing.List[ScrollRect]] = None
|
||||
|
||||
#: Sticky position constraint information
|
||||
sticky_position_constraint: typing.Optional[StickyPositionConstraint] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['layerId'] = self.layer_id.to_json()
|
||||
json['offsetX'] = self.offset_x
|
||||
json['offsetY'] = self.offset_y
|
||||
json['width'] = self.width
|
||||
json['height'] = self.height
|
||||
json['paintCount'] = self.paint_count
|
||||
json['drawsContent'] = self.draws_content
|
||||
if self.parent_layer_id is not None:
|
||||
json['parentLayerId'] = self.parent_layer_id.to_json()
|
||||
if self.backend_node_id is not None:
|
||||
json['backendNodeId'] = self.backend_node_id.to_json()
|
||||
if self.transform is not None:
|
||||
json['transform'] = [i for i in self.transform]
|
||||
if self.anchor_x is not None:
|
||||
json['anchorX'] = self.anchor_x
|
||||
if self.anchor_y is not None:
|
||||
json['anchorY'] = self.anchor_y
|
||||
if self.anchor_z is not None:
|
||||
json['anchorZ'] = self.anchor_z
|
||||
if self.invisible is not None:
|
||||
json['invisible'] = self.invisible
|
||||
if self.scroll_rects is not None:
|
||||
json['scrollRects'] = [i.to_json() for i in self.scroll_rects]
|
||||
if self.sticky_position_constraint is not None:
|
||||
json['stickyPositionConstraint'] = self.sticky_position_constraint.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
layer_id=LayerId.from_json(json['layerId']),
|
||||
offset_x=float(json['offsetX']),
|
||||
offset_y=float(json['offsetY']),
|
||||
width=float(json['width']),
|
||||
height=float(json['height']),
|
||||
paint_count=int(json['paintCount']),
|
||||
draws_content=bool(json['drawsContent']),
|
||||
parent_layer_id=LayerId.from_json(json['parentLayerId']) if 'parentLayerId' in json else None,
|
||||
backend_node_id=dom.BackendNodeId.from_json(json['backendNodeId']) if 'backendNodeId' in json else None,
|
||||
transform=[float(i) for i in json['transform']] if 'transform' in json else None,
|
||||
anchor_x=float(json['anchorX']) if 'anchorX' in json else None,
|
||||
anchor_y=float(json['anchorY']) if 'anchorY' in json else None,
|
||||
anchor_z=float(json['anchorZ']) if 'anchorZ' in json else None,
|
||||
invisible=bool(json['invisible']) if 'invisible' in json else None,
|
||||
scroll_rects=[ScrollRect.from_json(i) for i in json['scrollRects']] if 'scrollRects' in json else None,
|
||||
sticky_position_constraint=StickyPositionConstraint.from_json(json['stickyPositionConstraint']) if 'stickyPositionConstraint' in json else None,
|
||||
)
|
||||
|
||||
|
||||
class PaintProfile(list):
|
||||
'''
|
||||
Array of timings, one per paint step.
|
||||
'''
|
||||
def to_json(self) -> typing.List[float]:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: typing.List[float]) -> PaintProfile:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'PaintProfile({})'.format(super().__repr__())
|
||||
|
||||
|
||||
def compositing_reasons(
|
||||
layer_id: LayerId
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.Tuple[typing.List[str], typing.List[str]]]:
|
||||
'''
|
||||
Provides the reasons why the given layer was composited.
|
||||
|
||||
:param layer_id: The id of the layer for which we want to get the reasons it was composited.
|
||||
:returns: A tuple with the following items:
|
||||
|
||||
0. **compositingReasons** - A list of strings specifying reasons for the given layer to become composited.
|
||||
1. **compositingReasonIds** - A list of strings specifying reason IDs for the given layer to become composited.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['layerId'] = layer_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'LayerTree.compositingReasons',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return (
|
||||
[str(i) for i in json['compositingReasons']],
|
||||
[str(i) for i in json['compositingReasonIds']]
|
||||
)
|
||||
|
||||
|
||||
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Disables compositing tree inspection.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'LayerTree.disable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def enable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enables compositing tree inspection.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'LayerTree.enable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def load_snapshot(
|
||||
tiles: typing.List[PictureTile]
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,SnapshotId]:
|
||||
'''
|
||||
Returns the snapshot identifier.
|
||||
|
||||
:param tiles: An array of tiles composing the snapshot.
|
||||
:returns: The id of the snapshot.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['tiles'] = [i.to_json() for i in tiles]
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'LayerTree.loadSnapshot',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return SnapshotId.from_json(json['snapshotId'])
|
||||
|
||||
|
||||
def make_snapshot(
|
||||
layer_id: LayerId
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,SnapshotId]:
|
||||
'''
|
||||
Returns the layer snapshot identifier.
|
||||
|
||||
:param layer_id: The id of the layer.
|
||||
:returns: The id of the layer snapshot.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['layerId'] = layer_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'LayerTree.makeSnapshot',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return SnapshotId.from_json(json['snapshotId'])
|
||||
|
||||
|
||||
def profile_snapshot(
|
||||
snapshot_id: SnapshotId,
|
||||
min_repeat_count: typing.Optional[int] = None,
|
||||
min_duration: typing.Optional[float] = None,
|
||||
clip_rect: typing.Optional[dom.Rect] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[PaintProfile]]:
|
||||
'''
|
||||
:param snapshot_id: The id of the layer snapshot.
|
||||
:param min_repeat_count: *(Optional)* The maximum number of times to replay the snapshot (1, if not specified).
|
||||
:param min_duration: *(Optional)* The minimum duration (in seconds) to replay the snapshot.
|
||||
:param clip_rect: *(Optional)* The clip rectangle to apply when replaying the snapshot.
|
||||
:returns: The array of paint profiles, one per run.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['snapshotId'] = snapshot_id.to_json()
|
||||
if min_repeat_count is not None:
|
||||
params['minRepeatCount'] = min_repeat_count
|
||||
if min_duration is not None:
|
||||
params['minDuration'] = min_duration
|
||||
if clip_rect is not None:
|
||||
params['clipRect'] = clip_rect.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'LayerTree.profileSnapshot',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [PaintProfile.from_json(i) for i in json['timings']]
|
||||
|
||||
|
||||
def release_snapshot(
|
||||
snapshot_id: SnapshotId
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Releases layer snapshot captured by the back-end.
|
||||
|
||||
:param snapshot_id: The id of the layer snapshot.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['snapshotId'] = snapshot_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'LayerTree.releaseSnapshot',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def replay_snapshot(
|
||||
snapshot_id: SnapshotId,
|
||||
from_step: typing.Optional[int] = None,
|
||||
to_step: typing.Optional[int] = None,
|
||||
scale: typing.Optional[float] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,str]:
|
||||
'''
|
||||
Replays the layer snapshot and returns the resulting bitmap.
|
||||
|
||||
:param snapshot_id: The id of the layer snapshot.
|
||||
:param from_step: *(Optional)* The first step to replay from (replay from the very start if not specified).
|
||||
:param to_step: *(Optional)* The last step to replay to (replay till the end if not specified).
|
||||
:param scale: *(Optional)* The scale to apply while replaying (defaults to 1).
|
||||
:returns: A data: URL for resulting image.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['snapshotId'] = snapshot_id.to_json()
|
||||
if from_step is not None:
|
||||
params['fromStep'] = from_step
|
||||
if to_step is not None:
|
||||
params['toStep'] = to_step
|
||||
if scale is not None:
|
||||
params['scale'] = scale
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'LayerTree.replaySnapshot',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return str(json['dataURL'])
|
||||
|
||||
|
||||
def snapshot_command_log(
|
||||
snapshot_id: SnapshotId
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[dict]]:
|
||||
'''
|
||||
Replays the layer snapshot and returns canvas log.
|
||||
|
||||
:param snapshot_id: The id of the layer snapshot.
|
||||
:returns: The array of canvas function calls.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['snapshotId'] = snapshot_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'LayerTree.snapshotCommandLog',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [dict(i) for i in json['commandLog']]
|
||||
|
||||
|
||||
@event_class('LayerTree.layerPainted')
|
||||
@dataclass
|
||||
class LayerPainted:
|
||||
#: The id of the painted layer.
|
||||
layer_id: LayerId
|
||||
#: Clip rectangle.
|
||||
clip: dom.Rect
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> LayerPainted:
|
||||
return cls(
|
||||
layer_id=LayerId.from_json(json['layerId']),
|
||||
clip=dom.Rect.from_json(json['clip'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('LayerTree.layerTreeDidChange')
|
||||
@dataclass
|
||||
class LayerTreeDidChange:
|
||||
#: Layer tree, absent if not in the compositing mode.
|
||||
layers: typing.Optional[typing.List[Layer]]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> LayerTreeDidChange:
|
||||
return cls(
|
||||
layers=[Layer.from_json(i) for i in json['layers']] if 'layers' in json else None
|
||||
)
|
188
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/log.py
Executable file
188
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/log.py
Executable file
@ -0,0 +1,188 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: Log
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
from . import network
|
||||
from . import runtime
|
||||
|
||||
|
||||
@dataclass
|
||||
class LogEntry:
|
||||
'''
|
||||
Log entry.
|
||||
'''
|
||||
#: Log entry source.
|
||||
source: str
|
||||
|
||||
#: Log entry severity.
|
||||
level: str
|
||||
|
||||
#: Logged text.
|
||||
text: str
|
||||
|
||||
#: Timestamp when this entry was added.
|
||||
timestamp: runtime.Timestamp
|
||||
|
||||
category: typing.Optional[str] = None
|
||||
|
||||
#: URL of the resource if known.
|
||||
url: typing.Optional[str] = None
|
||||
|
||||
#: Line number in the resource.
|
||||
line_number: typing.Optional[int] = None
|
||||
|
||||
#: JavaScript stack trace.
|
||||
stack_trace: typing.Optional[runtime.StackTrace] = None
|
||||
|
||||
#: Identifier of the network request associated with this entry.
|
||||
network_request_id: typing.Optional[network.RequestId] = None
|
||||
|
||||
#: Identifier of the worker associated with this entry.
|
||||
worker_id: typing.Optional[str] = None
|
||||
|
||||
#: Call arguments.
|
||||
args: typing.Optional[typing.List[runtime.RemoteObject]] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['source'] = self.source
|
||||
json['level'] = self.level
|
||||
json['text'] = self.text
|
||||
json['timestamp'] = self.timestamp.to_json()
|
||||
if self.category is not None:
|
||||
json['category'] = self.category
|
||||
if self.url is not None:
|
||||
json['url'] = self.url
|
||||
if self.line_number is not None:
|
||||
json['lineNumber'] = self.line_number
|
||||
if self.stack_trace is not None:
|
||||
json['stackTrace'] = self.stack_trace.to_json()
|
||||
if self.network_request_id is not None:
|
||||
json['networkRequestId'] = self.network_request_id.to_json()
|
||||
if self.worker_id is not None:
|
||||
json['workerId'] = self.worker_id
|
||||
if self.args is not None:
|
||||
json['args'] = [i.to_json() for i in self.args]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
source=str(json['source']),
|
||||
level=str(json['level']),
|
||||
text=str(json['text']),
|
||||
timestamp=runtime.Timestamp.from_json(json['timestamp']),
|
||||
category=str(json['category']) if 'category' in json else None,
|
||||
url=str(json['url']) if 'url' in json else None,
|
||||
line_number=int(json['lineNumber']) if 'lineNumber' in json else None,
|
||||
stack_trace=runtime.StackTrace.from_json(json['stackTrace']) if 'stackTrace' in json else None,
|
||||
network_request_id=network.RequestId.from_json(json['networkRequestId']) if 'networkRequestId' in json else None,
|
||||
worker_id=str(json['workerId']) if 'workerId' in json else None,
|
||||
args=[runtime.RemoteObject.from_json(i) for i in json['args']] if 'args' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ViolationSetting:
|
||||
'''
|
||||
Violation configuration setting.
|
||||
'''
|
||||
#: Violation type.
|
||||
name: str
|
||||
|
||||
#: Time threshold to trigger upon.
|
||||
threshold: float
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['name'] = self.name
|
||||
json['threshold'] = self.threshold
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
name=str(json['name']),
|
||||
threshold=float(json['threshold']),
|
||||
)
|
||||
|
||||
|
||||
def clear() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Clears the log.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Log.clear',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Disables log domain, prevents further log entries from being reported to the client.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Log.disable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def enable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enables log domain, sends the entries collected so far to the client by means of the
|
||||
``entryAdded`` notification.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Log.enable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def start_violations_report(
|
||||
config: typing.List[ViolationSetting]
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
start violation reporting.
|
||||
|
||||
:param config: Configuration for violations.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['config'] = [i.to_json() for i in config]
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Log.startViolationsReport',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def stop_violations_report() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Stop violation reporting.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Log.stopViolationsReport',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
@event_class('Log.entryAdded')
|
||||
@dataclass
|
||||
class EntryAdded:
|
||||
'''
|
||||
Issued when new message was logged.
|
||||
'''
|
||||
#: The entry.
|
||||
entry: LogEntry
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> EntryAdded:
|
||||
return cls(
|
||||
entry=LogEntry.from_json(json['entry'])
|
||||
)
|
288
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/media.py
Executable file
288
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/media.py
Executable file
@ -0,0 +1,288 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: Media (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
|
||||
class PlayerId(str):
|
||||
'''
|
||||
Players will get an ID that is unique within the agent context.
|
||||
'''
|
||||
def to_json(self) -> str:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: str) -> PlayerId:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'PlayerId({})'.format(super().__repr__())
|
||||
|
||||
|
||||
class Timestamp(float):
|
||||
def to_json(self) -> float:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: float) -> Timestamp:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'Timestamp({})'.format(super().__repr__())
|
||||
|
||||
|
||||
@dataclass
|
||||
class PlayerMessage:
|
||||
'''
|
||||
Have one type per entry in MediaLogRecord::Type
|
||||
Corresponds to kMessage
|
||||
'''
|
||||
#: Keep in sync with MediaLogMessageLevel
|
||||
#: We are currently keeping the message level 'error' separate from the
|
||||
#: PlayerError type because right now they represent different things,
|
||||
#: this one being a DVLOG(ERROR) style log message that gets printed
|
||||
#: based on what log level is selected in the UI, and the other is a
|
||||
#: representation of a media::PipelineStatus object. Soon however we're
|
||||
#: going to be moving away from using PipelineStatus for errors and
|
||||
#: introducing a new error type which should hopefully let us integrate
|
||||
#: the error log level into the PlayerError type.
|
||||
level: str
|
||||
|
||||
message: str
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['level'] = self.level
|
||||
json['message'] = self.message
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
level=str(json['level']),
|
||||
message=str(json['message']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class PlayerProperty:
|
||||
'''
|
||||
Corresponds to kMediaPropertyChange
|
||||
'''
|
||||
name: str
|
||||
|
||||
value: str
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['name'] = self.name
|
||||
json['value'] = self.value
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
name=str(json['name']),
|
||||
value=str(json['value']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class PlayerEvent:
|
||||
'''
|
||||
Corresponds to kMediaEventTriggered
|
||||
'''
|
||||
timestamp: Timestamp
|
||||
|
||||
value: str
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['timestamp'] = self.timestamp.to_json()
|
||||
json['value'] = self.value
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
timestamp=Timestamp.from_json(json['timestamp']),
|
||||
value=str(json['value']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class PlayerErrorSourceLocation:
|
||||
'''
|
||||
Represents logged source line numbers reported in an error.
|
||||
NOTE: file and line are from chromium c++ implementation code, not js.
|
||||
'''
|
||||
file: str
|
||||
|
||||
line: int
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['file'] = self.file
|
||||
json['line'] = self.line
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
file=str(json['file']),
|
||||
line=int(json['line']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class PlayerError:
|
||||
'''
|
||||
Corresponds to kMediaError
|
||||
'''
|
||||
error_type: str
|
||||
|
||||
#: Code is the numeric enum entry for a specific set of error codes, such
|
||||
#: as PipelineStatusCodes in media/base/pipeline_status.h
|
||||
code: int
|
||||
|
||||
#: A trace of where this error was caused / where it passed through.
|
||||
stack: typing.List[PlayerErrorSourceLocation]
|
||||
|
||||
#: Errors potentially have a root cause error, ie, a DecoderError might be
|
||||
#: caused by an WindowsError
|
||||
cause: typing.List[PlayerError]
|
||||
|
||||
#: Extra data attached to an error, such as an HRESULT, Video Codec, etc.
|
||||
data: dict
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['errorType'] = self.error_type
|
||||
json['code'] = self.code
|
||||
json['stack'] = [i.to_json() for i in self.stack]
|
||||
json['cause'] = [i.to_json() for i in self.cause]
|
||||
json['data'] = self.data
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
error_type=str(json['errorType']),
|
||||
code=int(json['code']),
|
||||
stack=[PlayerErrorSourceLocation.from_json(i) for i in json['stack']],
|
||||
cause=[PlayerError.from_json(i) for i in json['cause']],
|
||||
data=dict(json['data']),
|
||||
)
|
||||
|
||||
|
||||
def enable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enables the Media domain
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Media.enable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Disables the Media domain.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Media.disable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
@event_class('Media.playerPropertiesChanged')
|
||||
@dataclass
|
||||
class PlayerPropertiesChanged:
|
||||
'''
|
||||
This can be called multiple times, and can be used to set / override /
|
||||
remove player properties. A null propValue indicates removal.
|
||||
'''
|
||||
player_id: PlayerId
|
||||
properties: typing.List[PlayerProperty]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> PlayerPropertiesChanged:
|
||||
return cls(
|
||||
player_id=PlayerId.from_json(json['playerId']),
|
||||
properties=[PlayerProperty.from_json(i) for i in json['properties']]
|
||||
)
|
||||
|
||||
|
||||
@event_class('Media.playerEventsAdded')
|
||||
@dataclass
|
||||
class PlayerEventsAdded:
|
||||
'''
|
||||
Send events as a list, allowing them to be batched on the browser for less
|
||||
congestion. If batched, events must ALWAYS be in chronological order.
|
||||
'''
|
||||
player_id: PlayerId
|
||||
events: typing.List[PlayerEvent]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> PlayerEventsAdded:
|
||||
return cls(
|
||||
player_id=PlayerId.from_json(json['playerId']),
|
||||
events=[PlayerEvent.from_json(i) for i in json['events']]
|
||||
)
|
||||
|
||||
|
||||
@event_class('Media.playerMessagesLogged')
|
||||
@dataclass
|
||||
class PlayerMessagesLogged:
|
||||
'''
|
||||
Send a list of any messages that need to be delivered.
|
||||
'''
|
||||
player_id: PlayerId
|
||||
messages: typing.List[PlayerMessage]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> PlayerMessagesLogged:
|
||||
return cls(
|
||||
player_id=PlayerId.from_json(json['playerId']),
|
||||
messages=[PlayerMessage.from_json(i) for i in json['messages']]
|
||||
)
|
||||
|
||||
|
||||
@event_class('Media.playerErrorsRaised')
|
||||
@dataclass
|
||||
class PlayerErrorsRaised:
|
||||
'''
|
||||
Send a list of any errors that need to be delivered.
|
||||
'''
|
||||
player_id: PlayerId
|
||||
errors: typing.List[PlayerError]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> PlayerErrorsRaised:
|
||||
return cls(
|
||||
player_id=PlayerId.from_json(json['playerId']),
|
||||
errors=[PlayerError.from_json(i) for i in json['errors']]
|
||||
)
|
||||
|
||||
|
||||
@event_class('Media.playersCreated')
|
||||
@dataclass
|
||||
class PlayersCreated:
|
||||
'''
|
||||
Called whenever a player is created, or when a new agent joins and receives
|
||||
a list of active players. If an agent is restored, it will receive the full
|
||||
list of player ids and all events again.
|
||||
'''
|
||||
players: typing.List[PlayerId]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> PlayersCreated:
|
||||
return cls(
|
||||
players=[PlayerId.from_json(i) for i in json['players']]
|
||||
)
|
303
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/memory.py
Executable file
303
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/memory.py
Executable file
@ -0,0 +1,303 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: Memory (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
|
||||
class PressureLevel(enum.Enum):
|
||||
'''
|
||||
Memory pressure level.
|
||||
'''
|
||||
MODERATE = "moderate"
|
||||
CRITICAL = "critical"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
@dataclass
|
||||
class SamplingProfileNode:
|
||||
'''
|
||||
Heap profile sample.
|
||||
'''
|
||||
#: Size of the sampled allocation.
|
||||
size: float
|
||||
|
||||
#: Total bytes attributed to this sample.
|
||||
total: float
|
||||
|
||||
#: Execution stack at the point of allocation.
|
||||
stack: typing.List[str]
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['size'] = self.size
|
||||
json['total'] = self.total
|
||||
json['stack'] = [i for i in self.stack]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
size=float(json['size']),
|
||||
total=float(json['total']),
|
||||
stack=[str(i) for i in json['stack']],
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class SamplingProfile:
|
||||
'''
|
||||
Array of heap profile samples.
|
||||
'''
|
||||
samples: typing.List[SamplingProfileNode]
|
||||
|
||||
modules: typing.List[Module]
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['samples'] = [i.to_json() for i in self.samples]
|
||||
json['modules'] = [i.to_json() for i in self.modules]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
samples=[SamplingProfileNode.from_json(i) for i in json['samples']],
|
||||
modules=[Module.from_json(i) for i in json['modules']],
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class Module:
|
||||
'''
|
||||
Executable module information
|
||||
'''
|
||||
#: Name of the module.
|
||||
name: str
|
||||
|
||||
#: UUID of the module.
|
||||
uuid: str
|
||||
|
||||
#: Base address where the module is loaded into memory. Encoded as a decimal
|
||||
#: or hexadecimal (0x prefixed) string.
|
||||
base_address: str
|
||||
|
||||
#: Size of the module in bytes.
|
||||
size: float
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['name'] = self.name
|
||||
json['uuid'] = self.uuid
|
||||
json['baseAddress'] = self.base_address
|
||||
json['size'] = self.size
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
name=str(json['name']),
|
||||
uuid=str(json['uuid']),
|
||||
base_address=str(json['baseAddress']),
|
||||
size=float(json['size']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class DOMCounter:
|
||||
'''
|
||||
DOM object counter data.
|
||||
'''
|
||||
#: Object name. Note: object names should be presumed volatile and clients should not expect
|
||||
#: the returned names to be consistent across runs.
|
||||
name: str
|
||||
|
||||
#: Object count.
|
||||
count: int
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['name'] = self.name
|
||||
json['count'] = self.count
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
name=str(json['name']),
|
||||
count=int(json['count']),
|
||||
)
|
||||
|
||||
|
||||
def get_dom_counters() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.Tuple[int, int, int]]:
|
||||
'''
|
||||
Retruns current DOM object counters.
|
||||
|
||||
:returns: A tuple with the following items:
|
||||
|
||||
0. **documents** -
|
||||
1. **nodes** -
|
||||
2. **jsEventListeners** -
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Memory.getDOMCounters',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return (
|
||||
int(json['documents']),
|
||||
int(json['nodes']),
|
||||
int(json['jsEventListeners'])
|
||||
)
|
||||
|
||||
|
||||
def get_dom_counters_for_leak_detection() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[DOMCounter]]:
|
||||
'''
|
||||
Retruns DOM object counters after preparing renderer for leak detection.
|
||||
|
||||
:returns: DOM object counters.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Memory.getDOMCountersForLeakDetection',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [DOMCounter.from_json(i) for i in json['counters']]
|
||||
|
||||
|
||||
def prepare_for_leak_detection() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Prepares for leak detection by terminating workers, stopping spellcheckers,
|
||||
dropping non-essential internal caches, running garbage collections, etc.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Memory.prepareForLeakDetection',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def forcibly_purge_java_script_memory() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Simulate OomIntervention by purging V8 memory.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Memory.forciblyPurgeJavaScriptMemory',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_pressure_notifications_suppressed(
|
||||
suppressed: bool
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enable/disable suppressing memory pressure notifications in all processes.
|
||||
|
||||
:param suppressed: If true, memory pressure notifications will be suppressed.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['suppressed'] = suppressed
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Memory.setPressureNotificationsSuppressed',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def simulate_pressure_notification(
|
||||
level: PressureLevel
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Simulate a memory pressure notification in all processes.
|
||||
|
||||
:param level: Memory pressure level of the notification.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['level'] = level.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Memory.simulatePressureNotification',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def start_sampling(
|
||||
sampling_interval: typing.Optional[int] = None,
|
||||
suppress_randomness: typing.Optional[bool] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Start collecting native memory profile.
|
||||
|
||||
:param sampling_interval: *(Optional)* Average number of bytes between samples.
|
||||
:param suppress_randomness: *(Optional)* Do not randomize intervals between samples.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if sampling_interval is not None:
|
||||
params['samplingInterval'] = sampling_interval
|
||||
if suppress_randomness is not None:
|
||||
params['suppressRandomness'] = suppress_randomness
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Memory.startSampling',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def stop_sampling() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Stop collecting native memory profile.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Memory.stopSampling',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def get_all_time_sampling_profile() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,SamplingProfile]:
|
||||
'''
|
||||
Retrieve native memory allocations profile
|
||||
collected since renderer process startup.
|
||||
|
||||
:returns:
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Memory.getAllTimeSamplingProfile',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return SamplingProfile.from_json(json['profile'])
|
||||
|
||||
|
||||
def get_browser_sampling_profile() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,SamplingProfile]:
|
||||
'''
|
||||
Retrieve native memory allocations profile
|
||||
collected since browser process startup.
|
||||
|
||||
:returns:
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Memory.getBrowserSamplingProfile',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return SamplingProfile.from_json(json['profile'])
|
||||
|
||||
|
||||
def get_sampling_profile() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,SamplingProfile]:
|
||||
'''
|
||||
Retrieve native memory allocations profile collected since last
|
||||
``startSampling`` call.
|
||||
|
||||
:returns:
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Memory.getSamplingProfile',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return SamplingProfile.from_json(json['profile'])
|
4023
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/network.py
Executable file
4023
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/network.py
Executable file
File diff suppressed because it is too large
Load Diff
1383
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/overlay.py
Executable file
1383
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/overlay.py
Executable file
File diff suppressed because it is too large
Load Diff
3837
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/page.py
Executable file
3837
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/page.py
Executable file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,116 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: Performance
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
|
||||
@dataclass
|
||||
class Metric:
|
||||
'''
|
||||
Run-time execution metric.
|
||||
'''
|
||||
#: Metric name.
|
||||
name: str
|
||||
|
||||
#: Metric value.
|
||||
value: float
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['name'] = self.name
|
||||
json['value'] = self.value
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
name=str(json['name']),
|
||||
value=float(json['value']),
|
||||
)
|
||||
|
||||
|
||||
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Disable collecting and reporting metrics.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Performance.disable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def enable(
|
||||
time_domain: typing.Optional[str] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enable collecting and reporting metrics.
|
||||
|
||||
:param time_domain: *(Optional)* Time domain to use for collecting and reporting duration metrics.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if time_domain is not None:
|
||||
params['timeDomain'] = time_domain
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Performance.enable',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_time_domain(
|
||||
time_domain: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Sets time domain to use for collecting and reporting duration metrics.
|
||||
Note that this must be called before enabling metrics collection. Calling
|
||||
this method while metrics collection is enabled returns an error.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param time_domain: Time domain
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['timeDomain'] = time_domain
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Performance.setTimeDomain',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def get_metrics() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[Metric]]:
|
||||
'''
|
||||
Retrieve current values of run-time metrics.
|
||||
|
||||
:returns: Current values for run-time metrics.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Performance.getMetrics',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [Metric.from_json(i) for i in json['metrics']]
|
||||
|
||||
|
||||
@event_class('Performance.metrics')
|
||||
@dataclass
|
||||
class Metrics:
|
||||
'''
|
||||
Current values of the metrics.
|
||||
'''
|
||||
#: Current values of the metrics.
|
||||
metrics: typing.List[Metric]
|
||||
#: Timestamp title.
|
||||
title: str
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> Metrics:
|
||||
return cls(
|
||||
metrics=[Metric.from_json(i) for i in json['metrics']],
|
||||
title=str(json['title'])
|
||||
)
|
@ -0,0 +1,198 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: PerformanceTimeline (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
from . import dom
|
||||
from . import network
|
||||
from . import page
|
||||
|
||||
|
||||
@dataclass
|
||||
class LargestContentfulPaint:
|
||||
'''
|
||||
See https://github.com/WICG/LargestContentfulPaint and largest_contentful_paint.idl
|
||||
'''
|
||||
render_time: network.TimeSinceEpoch
|
||||
|
||||
load_time: network.TimeSinceEpoch
|
||||
|
||||
#: The number of pixels being painted.
|
||||
size: float
|
||||
|
||||
#: The id attribute of the element, if available.
|
||||
element_id: typing.Optional[str] = None
|
||||
|
||||
#: The URL of the image (may be trimmed).
|
||||
url: typing.Optional[str] = None
|
||||
|
||||
node_id: typing.Optional[dom.BackendNodeId] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['renderTime'] = self.render_time.to_json()
|
||||
json['loadTime'] = self.load_time.to_json()
|
||||
json['size'] = self.size
|
||||
if self.element_id is not None:
|
||||
json['elementId'] = self.element_id
|
||||
if self.url is not None:
|
||||
json['url'] = self.url
|
||||
if self.node_id is not None:
|
||||
json['nodeId'] = self.node_id.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
render_time=network.TimeSinceEpoch.from_json(json['renderTime']),
|
||||
load_time=network.TimeSinceEpoch.from_json(json['loadTime']),
|
||||
size=float(json['size']),
|
||||
element_id=str(json['elementId']) if 'elementId' in json else None,
|
||||
url=str(json['url']) if 'url' in json else None,
|
||||
node_id=dom.BackendNodeId.from_json(json['nodeId']) if 'nodeId' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class LayoutShiftAttribution:
|
||||
previous_rect: dom.Rect
|
||||
|
||||
current_rect: dom.Rect
|
||||
|
||||
node_id: typing.Optional[dom.BackendNodeId] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['previousRect'] = self.previous_rect.to_json()
|
||||
json['currentRect'] = self.current_rect.to_json()
|
||||
if self.node_id is not None:
|
||||
json['nodeId'] = self.node_id.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
previous_rect=dom.Rect.from_json(json['previousRect']),
|
||||
current_rect=dom.Rect.from_json(json['currentRect']),
|
||||
node_id=dom.BackendNodeId.from_json(json['nodeId']) if 'nodeId' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class LayoutShift:
|
||||
'''
|
||||
See https://wicg.github.io/layout-instability/#sec-layout-shift and layout_shift.idl
|
||||
'''
|
||||
#: Score increment produced by this event.
|
||||
value: float
|
||||
|
||||
had_recent_input: bool
|
||||
|
||||
last_input_time: network.TimeSinceEpoch
|
||||
|
||||
sources: typing.List[LayoutShiftAttribution]
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['value'] = self.value
|
||||
json['hadRecentInput'] = self.had_recent_input
|
||||
json['lastInputTime'] = self.last_input_time.to_json()
|
||||
json['sources'] = [i.to_json() for i in self.sources]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
value=float(json['value']),
|
||||
had_recent_input=bool(json['hadRecentInput']),
|
||||
last_input_time=network.TimeSinceEpoch.from_json(json['lastInputTime']),
|
||||
sources=[LayoutShiftAttribution.from_json(i) for i in json['sources']],
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class TimelineEvent:
|
||||
#: Identifies the frame that this event is related to. Empty for non-frame targets.
|
||||
frame_id: page.FrameId
|
||||
|
||||
#: The event type, as specified in https://w3c.github.io/performance-timeline/#dom-performanceentry-entrytype
|
||||
#: This determines which of the optional "details" fields is present.
|
||||
type_: str
|
||||
|
||||
#: Name may be empty depending on the type.
|
||||
name: str
|
||||
|
||||
#: Time in seconds since Epoch, monotonically increasing within document lifetime.
|
||||
time: network.TimeSinceEpoch
|
||||
|
||||
#: Event duration, if applicable.
|
||||
duration: typing.Optional[float] = None
|
||||
|
||||
lcp_details: typing.Optional[LargestContentfulPaint] = None
|
||||
|
||||
layout_shift_details: typing.Optional[LayoutShift] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['frameId'] = self.frame_id.to_json()
|
||||
json['type'] = self.type_
|
||||
json['name'] = self.name
|
||||
json['time'] = self.time.to_json()
|
||||
if self.duration is not None:
|
||||
json['duration'] = self.duration
|
||||
if self.lcp_details is not None:
|
||||
json['lcpDetails'] = self.lcp_details.to_json()
|
||||
if self.layout_shift_details is not None:
|
||||
json['layoutShiftDetails'] = self.layout_shift_details.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
frame_id=page.FrameId.from_json(json['frameId']),
|
||||
type_=str(json['type']),
|
||||
name=str(json['name']),
|
||||
time=network.TimeSinceEpoch.from_json(json['time']),
|
||||
duration=float(json['duration']) if 'duration' in json else None,
|
||||
lcp_details=LargestContentfulPaint.from_json(json['lcpDetails']) if 'lcpDetails' in json else None,
|
||||
layout_shift_details=LayoutShift.from_json(json['layoutShiftDetails']) if 'layoutShiftDetails' in json else None,
|
||||
)
|
||||
|
||||
|
||||
def enable(
|
||||
event_types: typing.List[str]
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Previously buffered events would be reported before method returns.
|
||||
See also: timelineEventAdded
|
||||
|
||||
:param event_types: The types of event to report, as specified in https://w3c.github.io/performance-timeline/#dom-performanceentry-entrytype The specified filter overrides any previous filters, passing empty filter disables recording. Note that not all types exposed to the web platform are currently supported.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['eventTypes'] = [i for i in event_types]
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'PerformanceTimeline.enable',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
@event_class('PerformanceTimeline.timelineEventAdded')
|
||||
@dataclass
|
||||
class TimelineEventAdded:
|
||||
'''
|
||||
Sent when a performance timeline event is added. See reportPerformanceTimeline method.
|
||||
'''
|
||||
event: TimelineEvent
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> TimelineEventAdded:
|
||||
return cls(
|
||||
event=TimelineEvent.from_json(json['event'])
|
||||
)
|
526
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/preload.py
Executable file
526
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/preload.py
Executable file
@ -0,0 +1,526 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: Preload (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
from . import dom
|
||||
from . import network
|
||||
from . import page
|
||||
|
||||
|
||||
class RuleSetId(str):
|
||||
'''
|
||||
Unique id
|
||||
'''
|
||||
def to_json(self) -> str:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: str) -> RuleSetId:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'RuleSetId({})'.format(super().__repr__())
|
||||
|
||||
|
||||
@dataclass
|
||||
class RuleSet:
|
||||
'''
|
||||
Corresponds to SpeculationRuleSet
|
||||
'''
|
||||
id_: RuleSetId
|
||||
|
||||
#: Identifies a document which the rule set is associated with.
|
||||
loader_id: network.LoaderId
|
||||
|
||||
#: Source text of JSON representing the rule set. If it comes from
|
||||
#: ``script`` tag, it is the textContent of the node. Note that it is
|
||||
#: a JSON for valid case.
|
||||
#:
|
||||
#: See also:
|
||||
#: - https://wicg.github.io/nav-speculation/speculation-rules.html
|
||||
#: - https://github.com/WICG/nav-speculation/blob/main/triggers.md
|
||||
source_text: str
|
||||
|
||||
#: A speculation rule set is either added through an inline
|
||||
#: ``script`` tag or through an external resource via the
|
||||
#: 'Speculation-Rules' HTTP header. For the first case, we include
|
||||
#: the BackendNodeId of the relevant ``script`` tag. For the second
|
||||
#: case, we include the external URL where the rule set was loaded
|
||||
#: from, and also RequestId if Network domain is enabled.
|
||||
#:
|
||||
#: See also:
|
||||
#: - https://wicg.github.io/nav-speculation/speculation-rules.html#speculation-rules-script
|
||||
#: - https://wicg.github.io/nav-speculation/speculation-rules.html#speculation-rules-header
|
||||
backend_node_id: typing.Optional[dom.BackendNodeId] = None
|
||||
|
||||
url: typing.Optional[str] = None
|
||||
|
||||
request_id: typing.Optional[network.RequestId] = None
|
||||
|
||||
#: Error information
|
||||
#: ``errorMessage`` is null iff ``errorType`` is null.
|
||||
error_type: typing.Optional[RuleSetErrorType] = None
|
||||
|
||||
#: TODO(https://crbug.com/1425354): Replace this property with structured error.
|
||||
error_message: typing.Optional[str] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['id'] = self.id_.to_json()
|
||||
json['loaderId'] = self.loader_id.to_json()
|
||||
json['sourceText'] = self.source_text
|
||||
if self.backend_node_id is not None:
|
||||
json['backendNodeId'] = self.backend_node_id.to_json()
|
||||
if self.url is not None:
|
||||
json['url'] = self.url
|
||||
if self.request_id is not None:
|
||||
json['requestId'] = self.request_id.to_json()
|
||||
if self.error_type is not None:
|
||||
json['errorType'] = self.error_type.to_json()
|
||||
if self.error_message is not None:
|
||||
json['errorMessage'] = self.error_message
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
id_=RuleSetId.from_json(json['id']),
|
||||
loader_id=network.LoaderId.from_json(json['loaderId']),
|
||||
source_text=str(json['sourceText']),
|
||||
backend_node_id=dom.BackendNodeId.from_json(json['backendNodeId']) if 'backendNodeId' in json else None,
|
||||
url=str(json['url']) if 'url' in json else None,
|
||||
request_id=network.RequestId.from_json(json['requestId']) if 'requestId' in json else None,
|
||||
error_type=RuleSetErrorType.from_json(json['errorType']) if 'errorType' in json else None,
|
||||
error_message=str(json['errorMessage']) if 'errorMessage' in json else None,
|
||||
)
|
||||
|
||||
|
||||
class RuleSetErrorType(enum.Enum):
|
||||
SOURCE_IS_NOT_JSON_OBJECT = "SourceIsNotJsonObject"
|
||||
INVALID_RULES_SKIPPED = "InvalidRulesSkipped"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
class SpeculationAction(enum.Enum):
|
||||
'''
|
||||
The type of preloading attempted. It corresponds to
|
||||
mojom::SpeculationAction (although PrefetchWithSubresources is omitted as it
|
||||
isn't being used by clients).
|
||||
'''
|
||||
PREFETCH = "Prefetch"
|
||||
PRERENDER = "Prerender"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
class SpeculationTargetHint(enum.Enum):
|
||||
'''
|
||||
Corresponds to mojom::SpeculationTargetHint.
|
||||
See https://github.com/WICG/nav-speculation/blob/main/triggers.md#window-name-targeting-hints
|
||||
'''
|
||||
BLANK = "Blank"
|
||||
SELF = "Self"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
@dataclass
|
||||
class PreloadingAttemptKey:
|
||||
'''
|
||||
A key that identifies a preloading attempt.
|
||||
|
||||
The url used is the url specified by the trigger (i.e. the initial URL), and
|
||||
not the final url that is navigated to. For example, prerendering allows
|
||||
same-origin main frame navigations during the attempt, but the attempt is
|
||||
still keyed with the initial URL.
|
||||
'''
|
||||
loader_id: network.LoaderId
|
||||
|
||||
action: SpeculationAction
|
||||
|
||||
url: str
|
||||
|
||||
target_hint: typing.Optional[SpeculationTargetHint] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['loaderId'] = self.loader_id.to_json()
|
||||
json['action'] = self.action.to_json()
|
||||
json['url'] = self.url
|
||||
if self.target_hint is not None:
|
||||
json['targetHint'] = self.target_hint.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
loader_id=network.LoaderId.from_json(json['loaderId']),
|
||||
action=SpeculationAction.from_json(json['action']),
|
||||
url=str(json['url']),
|
||||
target_hint=SpeculationTargetHint.from_json(json['targetHint']) if 'targetHint' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class PreloadingAttemptSource:
|
||||
'''
|
||||
Lists sources for a preloading attempt, specifically the ids of rule sets
|
||||
that had a speculation rule that triggered the attempt, and the
|
||||
BackendNodeIds of <a href> or <area href> elements that triggered the
|
||||
attempt (in the case of attempts triggered by a document rule). It is
|
||||
possible for multiple rule sets and links to trigger a single attempt.
|
||||
'''
|
||||
key: PreloadingAttemptKey
|
||||
|
||||
rule_set_ids: typing.List[RuleSetId]
|
||||
|
||||
node_ids: typing.List[dom.BackendNodeId]
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['key'] = self.key.to_json()
|
||||
json['ruleSetIds'] = [i.to_json() for i in self.rule_set_ids]
|
||||
json['nodeIds'] = [i.to_json() for i in self.node_ids]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
key=PreloadingAttemptKey.from_json(json['key']),
|
||||
rule_set_ids=[RuleSetId.from_json(i) for i in json['ruleSetIds']],
|
||||
node_ids=[dom.BackendNodeId.from_json(i) for i in json['nodeIds']],
|
||||
)
|
||||
|
||||
|
||||
class PrerenderFinalStatus(enum.Enum):
|
||||
'''
|
||||
List of FinalStatus reasons for Prerender2.
|
||||
'''
|
||||
ACTIVATED = "Activated"
|
||||
DESTROYED = "Destroyed"
|
||||
LOW_END_DEVICE = "LowEndDevice"
|
||||
INVALID_SCHEME_REDIRECT = "InvalidSchemeRedirect"
|
||||
INVALID_SCHEME_NAVIGATION = "InvalidSchemeNavigation"
|
||||
NAVIGATION_REQUEST_BLOCKED_BY_CSP = "NavigationRequestBlockedByCsp"
|
||||
MAIN_FRAME_NAVIGATION = "MainFrameNavigation"
|
||||
MOJO_BINDER_POLICY = "MojoBinderPolicy"
|
||||
RENDERER_PROCESS_CRASHED = "RendererProcessCrashed"
|
||||
RENDERER_PROCESS_KILLED = "RendererProcessKilled"
|
||||
DOWNLOAD = "Download"
|
||||
TRIGGER_DESTROYED = "TriggerDestroyed"
|
||||
NAVIGATION_NOT_COMMITTED = "NavigationNotCommitted"
|
||||
NAVIGATION_BAD_HTTP_STATUS = "NavigationBadHttpStatus"
|
||||
CLIENT_CERT_REQUESTED = "ClientCertRequested"
|
||||
NAVIGATION_REQUEST_NETWORK_ERROR = "NavigationRequestNetworkError"
|
||||
CANCEL_ALL_HOSTS_FOR_TESTING = "CancelAllHostsForTesting"
|
||||
DID_FAIL_LOAD = "DidFailLoad"
|
||||
STOP = "Stop"
|
||||
SSL_CERTIFICATE_ERROR = "SslCertificateError"
|
||||
LOGIN_AUTH_REQUESTED = "LoginAuthRequested"
|
||||
UA_CHANGE_REQUIRES_RELOAD = "UaChangeRequiresReload"
|
||||
BLOCKED_BY_CLIENT = "BlockedByClient"
|
||||
AUDIO_OUTPUT_DEVICE_REQUESTED = "AudioOutputDeviceRequested"
|
||||
MIXED_CONTENT = "MixedContent"
|
||||
TRIGGER_BACKGROUNDED = "TriggerBackgrounded"
|
||||
MEMORY_LIMIT_EXCEEDED = "MemoryLimitExceeded"
|
||||
DATA_SAVER_ENABLED = "DataSaverEnabled"
|
||||
TRIGGER_URL_HAS_EFFECTIVE_URL = "TriggerUrlHasEffectiveUrl"
|
||||
ACTIVATED_BEFORE_STARTED = "ActivatedBeforeStarted"
|
||||
INACTIVE_PAGE_RESTRICTION = "InactivePageRestriction"
|
||||
START_FAILED = "StartFailed"
|
||||
TIMEOUT_BACKGROUNDED = "TimeoutBackgrounded"
|
||||
CROSS_SITE_REDIRECT_IN_INITIAL_NAVIGATION = "CrossSiteRedirectInInitialNavigation"
|
||||
CROSS_SITE_NAVIGATION_IN_INITIAL_NAVIGATION = "CrossSiteNavigationInInitialNavigation"
|
||||
SAME_SITE_CROSS_ORIGIN_REDIRECT_NOT_OPT_IN_IN_INITIAL_NAVIGATION = "SameSiteCrossOriginRedirectNotOptInInInitialNavigation"
|
||||
SAME_SITE_CROSS_ORIGIN_NAVIGATION_NOT_OPT_IN_IN_INITIAL_NAVIGATION = "SameSiteCrossOriginNavigationNotOptInInInitialNavigation"
|
||||
ACTIVATION_NAVIGATION_PARAMETER_MISMATCH = "ActivationNavigationParameterMismatch"
|
||||
ACTIVATED_IN_BACKGROUND = "ActivatedInBackground"
|
||||
EMBEDDER_HOST_DISALLOWED = "EmbedderHostDisallowed"
|
||||
ACTIVATION_NAVIGATION_DESTROYED_BEFORE_SUCCESS = "ActivationNavigationDestroyedBeforeSuccess"
|
||||
TAB_CLOSED_BY_USER_GESTURE = "TabClosedByUserGesture"
|
||||
TAB_CLOSED_WITHOUT_USER_GESTURE = "TabClosedWithoutUserGesture"
|
||||
PRIMARY_MAIN_FRAME_RENDERER_PROCESS_CRASHED = "PrimaryMainFrameRendererProcessCrashed"
|
||||
PRIMARY_MAIN_FRAME_RENDERER_PROCESS_KILLED = "PrimaryMainFrameRendererProcessKilled"
|
||||
ACTIVATION_FRAME_POLICY_NOT_COMPATIBLE = "ActivationFramePolicyNotCompatible"
|
||||
PRELOADING_DISABLED = "PreloadingDisabled"
|
||||
BATTERY_SAVER_ENABLED = "BatterySaverEnabled"
|
||||
ACTIVATED_DURING_MAIN_FRAME_NAVIGATION = "ActivatedDuringMainFrameNavigation"
|
||||
PRELOADING_UNSUPPORTED_BY_WEB_CONTENTS = "PreloadingUnsupportedByWebContents"
|
||||
CROSS_SITE_REDIRECT_IN_MAIN_FRAME_NAVIGATION = "CrossSiteRedirectInMainFrameNavigation"
|
||||
CROSS_SITE_NAVIGATION_IN_MAIN_FRAME_NAVIGATION = "CrossSiteNavigationInMainFrameNavigation"
|
||||
SAME_SITE_CROSS_ORIGIN_REDIRECT_NOT_OPT_IN_IN_MAIN_FRAME_NAVIGATION = "SameSiteCrossOriginRedirectNotOptInInMainFrameNavigation"
|
||||
SAME_SITE_CROSS_ORIGIN_NAVIGATION_NOT_OPT_IN_IN_MAIN_FRAME_NAVIGATION = "SameSiteCrossOriginNavigationNotOptInInMainFrameNavigation"
|
||||
MEMORY_PRESSURE_ON_TRIGGER = "MemoryPressureOnTrigger"
|
||||
MEMORY_PRESSURE_AFTER_TRIGGERED = "MemoryPressureAfterTriggered"
|
||||
PRERENDERING_DISABLED_BY_DEV_TOOLS = "PrerenderingDisabledByDevTools"
|
||||
SPECULATION_RULE_REMOVED = "SpeculationRuleRemoved"
|
||||
ACTIVATED_WITH_AUXILIARY_BROWSING_CONTEXTS = "ActivatedWithAuxiliaryBrowsingContexts"
|
||||
MAX_NUM_OF_RUNNING_EAGER_PRERENDERS_EXCEEDED = "MaxNumOfRunningEagerPrerendersExceeded"
|
||||
MAX_NUM_OF_RUNNING_NON_EAGER_PRERENDERS_EXCEEDED = "MaxNumOfRunningNonEagerPrerendersExceeded"
|
||||
MAX_NUM_OF_RUNNING_EMBEDDER_PRERENDERS_EXCEEDED = "MaxNumOfRunningEmbedderPrerendersExceeded"
|
||||
PRERENDERING_URL_HAS_EFFECTIVE_URL = "PrerenderingUrlHasEffectiveUrl"
|
||||
REDIRECTED_PRERENDERING_URL_HAS_EFFECTIVE_URL = "RedirectedPrerenderingUrlHasEffectiveUrl"
|
||||
ACTIVATION_URL_HAS_EFFECTIVE_URL = "ActivationUrlHasEffectiveUrl"
|
||||
JAVA_SCRIPT_INTERFACE_ADDED = "JavaScriptInterfaceAdded"
|
||||
JAVA_SCRIPT_INTERFACE_REMOVED = "JavaScriptInterfaceRemoved"
|
||||
ALL_PRERENDERING_CANCELED = "AllPrerenderingCanceled"
|
||||
WINDOW_CLOSED = "WindowClosed"
|
||||
SLOW_NETWORK = "SlowNetwork"
|
||||
OTHER_PRERENDERED_PAGE_ACTIVATED = "OtherPrerenderedPageActivated"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
class PreloadingStatus(enum.Enum):
|
||||
'''
|
||||
Preloading status values, see also PreloadingTriggeringOutcome. This
|
||||
status is shared by prefetchStatusUpdated and prerenderStatusUpdated.
|
||||
'''
|
||||
PENDING = "Pending"
|
||||
RUNNING = "Running"
|
||||
READY = "Ready"
|
||||
SUCCESS = "Success"
|
||||
FAILURE = "Failure"
|
||||
NOT_SUPPORTED = "NotSupported"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
class PrefetchStatus(enum.Enum):
|
||||
'''
|
||||
TODO(https://crbug.com/1384419): revisit the list of PrefetchStatus and
|
||||
filter out the ones that aren't necessary to the developers.
|
||||
'''
|
||||
PREFETCH_ALLOWED = "PrefetchAllowed"
|
||||
PREFETCH_FAILED_INELIGIBLE_REDIRECT = "PrefetchFailedIneligibleRedirect"
|
||||
PREFETCH_FAILED_INVALID_REDIRECT = "PrefetchFailedInvalidRedirect"
|
||||
PREFETCH_FAILED_MIME_NOT_SUPPORTED = "PrefetchFailedMIMENotSupported"
|
||||
PREFETCH_FAILED_NET_ERROR = "PrefetchFailedNetError"
|
||||
PREFETCH_FAILED_NON2_XX = "PrefetchFailedNon2XX"
|
||||
PREFETCH_EVICTED_AFTER_CANDIDATE_REMOVED = "PrefetchEvictedAfterCandidateRemoved"
|
||||
PREFETCH_EVICTED_FOR_NEWER_PREFETCH = "PrefetchEvictedForNewerPrefetch"
|
||||
PREFETCH_HELDBACK = "PrefetchHeldback"
|
||||
PREFETCH_INELIGIBLE_RETRY_AFTER = "PrefetchIneligibleRetryAfter"
|
||||
PREFETCH_IS_PRIVACY_DECOY = "PrefetchIsPrivacyDecoy"
|
||||
PREFETCH_IS_STALE = "PrefetchIsStale"
|
||||
PREFETCH_NOT_ELIGIBLE_BROWSER_CONTEXT_OFF_THE_RECORD = "PrefetchNotEligibleBrowserContextOffTheRecord"
|
||||
PREFETCH_NOT_ELIGIBLE_DATA_SAVER_ENABLED = "PrefetchNotEligibleDataSaverEnabled"
|
||||
PREFETCH_NOT_ELIGIBLE_EXISTING_PROXY = "PrefetchNotEligibleExistingProxy"
|
||||
PREFETCH_NOT_ELIGIBLE_HOST_IS_NON_UNIQUE = "PrefetchNotEligibleHostIsNonUnique"
|
||||
PREFETCH_NOT_ELIGIBLE_NON_DEFAULT_STORAGE_PARTITION = "PrefetchNotEligibleNonDefaultStoragePartition"
|
||||
PREFETCH_NOT_ELIGIBLE_SAME_SITE_CROSS_ORIGIN_PREFETCH_REQUIRED_PROXY = "PrefetchNotEligibleSameSiteCrossOriginPrefetchRequiredProxy"
|
||||
PREFETCH_NOT_ELIGIBLE_SCHEME_IS_NOT_HTTPS = "PrefetchNotEligibleSchemeIsNotHttps"
|
||||
PREFETCH_NOT_ELIGIBLE_USER_HAS_COOKIES = "PrefetchNotEligibleUserHasCookies"
|
||||
PREFETCH_NOT_ELIGIBLE_USER_HAS_SERVICE_WORKER = "PrefetchNotEligibleUserHasServiceWorker"
|
||||
PREFETCH_NOT_ELIGIBLE_BATTERY_SAVER_ENABLED = "PrefetchNotEligibleBatterySaverEnabled"
|
||||
PREFETCH_NOT_ELIGIBLE_PRELOADING_DISABLED = "PrefetchNotEligiblePreloadingDisabled"
|
||||
PREFETCH_NOT_FINISHED_IN_TIME = "PrefetchNotFinishedInTime"
|
||||
PREFETCH_NOT_STARTED = "PrefetchNotStarted"
|
||||
PREFETCH_NOT_USED_COOKIES_CHANGED = "PrefetchNotUsedCookiesChanged"
|
||||
PREFETCH_PROXY_NOT_AVAILABLE = "PrefetchProxyNotAvailable"
|
||||
PREFETCH_RESPONSE_USED = "PrefetchResponseUsed"
|
||||
PREFETCH_SUCCESSFUL_BUT_NOT_USED = "PrefetchSuccessfulButNotUsed"
|
||||
PREFETCH_NOT_USED_PROBE_FAILED = "PrefetchNotUsedProbeFailed"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
@dataclass
|
||||
class PrerenderMismatchedHeaders:
|
||||
'''
|
||||
Information of headers to be displayed when the header mismatch occurred.
|
||||
'''
|
||||
header_name: str
|
||||
|
||||
initial_value: typing.Optional[str] = None
|
||||
|
||||
activation_value: typing.Optional[str] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['headerName'] = self.header_name
|
||||
if self.initial_value is not None:
|
||||
json['initialValue'] = self.initial_value
|
||||
if self.activation_value is not None:
|
||||
json['activationValue'] = self.activation_value
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
header_name=str(json['headerName']),
|
||||
initial_value=str(json['initialValue']) if 'initialValue' in json else None,
|
||||
activation_value=str(json['activationValue']) if 'activationValue' in json else None,
|
||||
)
|
||||
|
||||
|
||||
def enable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Preload.enable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Preload.disable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
@event_class('Preload.ruleSetUpdated')
|
||||
@dataclass
|
||||
class RuleSetUpdated:
|
||||
'''
|
||||
Upsert. Currently, it is only emitted when a rule set added.
|
||||
'''
|
||||
rule_set: RuleSet
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> RuleSetUpdated:
|
||||
return cls(
|
||||
rule_set=RuleSet.from_json(json['ruleSet'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('Preload.ruleSetRemoved')
|
||||
@dataclass
|
||||
class RuleSetRemoved:
|
||||
id_: RuleSetId
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> RuleSetRemoved:
|
||||
return cls(
|
||||
id_=RuleSetId.from_json(json['id'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('Preload.preloadEnabledStateUpdated')
|
||||
@dataclass
|
||||
class PreloadEnabledStateUpdated:
|
||||
'''
|
||||
Fired when a preload enabled state is updated.
|
||||
'''
|
||||
disabled_by_preference: bool
|
||||
disabled_by_data_saver: bool
|
||||
disabled_by_battery_saver: bool
|
||||
disabled_by_holdback_prefetch_speculation_rules: bool
|
||||
disabled_by_holdback_prerender_speculation_rules: bool
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> PreloadEnabledStateUpdated:
|
||||
return cls(
|
||||
disabled_by_preference=bool(json['disabledByPreference']),
|
||||
disabled_by_data_saver=bool(json['disabledByDataSaver']),
|
||||
disabled_by_battery_saver=bool(json['disabledByBatterySaver']),
|
||||
disabled_by_holdback_prefetch_speculation_rules=bool(json['disabledByHoldbackPrefetchSpeculationRules']),
|
||||
disabled_by_holdback_prerender_speculation_rules=bool(json['disabledByHoldbackPrerenderSpeculationRules'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('Preload.prefetchStatusUpdated')
|
||||
@dataclass
|
||||
class PrefetchStatusUpdated:
|
||||
'''
|
||||
Fired when a prefetch attempt is updated.
|
||||
'''
|
||||
key: PreloadingAttemptKey
|
||||
#: The frame id of the frame initiating prefetch.
|
||||
initiating_frame_id: page.FrameId
|
||||
prefetch_url: str
|
||||
status: PreloadingStatus
|
||||
prefetch_status: PrefetchStatus
|
||||
request_id: network.RequestId
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> PrefetchStatusUpdated:
|
||||
return cls(
|
||||
key=PreloadingAttemptKey.from_json(json['key']),
|
||||
initiating_frame_id=page.FrameId.from_json(json['initiatingFrameId']),
|
||||
prefetch_url=str(json['prefetchUrl']),
|
||||
status=PreloadingStatus.from_json(json['status']),
|
||||
prefetch_status=PrefetchStatus.from_json(json['prefetchStatus']),
|
||||
request_id=network.RequestId.from_json(json['requestId'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('Preload.prerenderStatusUpdated')
|
||||
@dataclass
|
||||
class PrerenderStatusUpdated:
|
||||
'''
|
||||
Fired when a prerender attempt is updated.
|
||||
'''
|
||||
key: PreloadingAttemptKey
|
||||
status: PreloadingStatus
|
||||
prerender_status: typing.Optional[PrerenderFinalStatus]
|
||||
#: This is used to give users more information about the name of Mojo interface
|
||||
#: that is incompatible with prerender and has caused the cancellation of the attempt.
|
||||
disallowed_mojo_interface: typing.Optional[str]
|
||||
mismatched_headers: typing.Optional[typing.List[PrerenderMismatchedHeaders]]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> PrerenderStatusUpdated:
|
||||
return cls(
|
||||
key=PreloadingAttemptKey.from_json(json['key']),
|
||||
status=PreloadingStatus.from_json(json['status']),
|
||||
prerender_status=PrerenderFinalStatus.from_json(json['prerenderStatus']) if 'prerenderStatus' in json else None,
|
||||
disallowed_mojo_interface=str(json['disallowedMojoInterface']) if 'disallowedMojoInterface' in json else None,
|
||||
mismatched_headers=[PrerenderMismatchedHeaders.from_json(i) for i in json['mismatchedHeaders']] if 'mismatchedHeaders' in json else None
|
||||
)
|
||||
|
||||
|
||||
@event_class('Preload.preloadingAttemptSourcesUpdated')
|
||||
@dataclass
|
||||
class PreloadingAttemptSourcesUpdated:
|
||||
'''
|
||||
Send a list of sources for all preloading attempts in a document.
|
||||
'''
|
||||
loader_id: network.LoaderId
|
||||
preloading_attempt_sources: typing.List[PreloadingAttemptSource]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> PreloadingAttemptSourcesUpdated:
|
||||
return cls(
|
||||
loader_id=network.LoaderId.from_json(json['loaderId']),
|
||||
preloading_attempt_sources=[PreloadingAttemptSource.from_json(i) for i in json['preloadingAttemptSources']]
|
||||
)
|
418
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/profiler.py
Executable file
418
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/profiler.py
Executable file
@ -0,0 +1,418 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: Profiler
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
from . import debugger
|
||||
from . import runtime
|
||||
|
||||
|
||||
@dataclass
|
||||
class ProfileNode:
|
||||
'''
|
||||
Profile node. Holds callsite information, execution statistics and child nodes.
|
||||
'''
|
||||
#: Unique id of the node.
|
||||
id_: int
|
||||
|
||||
#: Function location.
|
||||
call_frame: runtime.CallFrame
|
||||
|
||||
#: Number of samples where this node was on top of the call stack.
|
||||
hit_count: typing.Optional[int] = None
|
||||
|
||||
#: Child node ids.
|
||||
children: typing.Optional[typing.List[int]] = None
|
||||
|
||||
#: The reason of being not optimized. The function may be deoptimized or marked as don't
|
||||
#: optimize.
|
||||
deopt_reason: typing.Optional[str] = None
|
||||
|
||||
#: An array of source position ticks.
|
||||
position_ticks: typing.Optional[typing.List[PositionTickInfo]] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['id'] = self.id_
|
||||
json['callFrame'] = self.call_frame.to_json()
|
||||
if self.hit_count is not None:
|
||||
json['hitCount'] = self.hit_count
|
||||
if self.children is not None:
|
||||
json['children'] = [i for i in self.children]
|
||||
if self.deopt_reason is not None:
|
||||
json['deoptReason'] = self.deopt_reason
|
||||
if self.position_ticks is not None:
|
||||
json['positionTicks'] = [i.to_json() for i in self.position_ticks]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
id_=int(json['id']),
|
||||
call_frame=runtime.CallFrame.from_json(json['callFrame']),
|
||||
hit_count=int(json['hitCount']) if 'hitCount' in json else None,
|
||||
children=[int(i) for i in json['children']] if 'children' in json else None,
|
||||
deopt_reason=str(json['deoptReason']) if 'deoptReason' in json else None,
|
||||
position_ticks=[PositionTickInfo.from_json(i) for i in json['positionTicks']] if 'positionTicks' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class Profile:
|
||||
'''
|
||||
Profile.
|
||||
'''
|
||||
#: The list of profile nodes. First item is the root node.
|
||||
nodes: typing.List[ProfileNode]
|
||||
|
||||
#: Profiling start timestamp in microseconds.
|
||||
start_time: float
|
||||
|
||||
#: Profiling end timestamp in microseconds.
|
||||
end_time: float
|
||||
|
||||
#: Ids of samples top nodes.
|
||||
samples: typing.Optional[typing.List[int]] = None
|
||||
|
||||
#: Time intervals between adjacent samples in microseconds. The first delta is relative to the
|
||||
#: profile startTime.
|
||||
time_deltas: typing.Optional[typing.List[int]] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['nodes'] = [i.to_json() for i in self.nodes]
|
||||
json['startTime'] = self.start_time
|
||||
json['endTime'] = self.end_time
|
||||
if self.samples is not None:
|
||||
json['samples'] = [i for i in self.samples]
|
||||
if self.time_deltas is not None:
|
||||
json['timeDeltas'] = [i for i in self.time_deltas]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
nodes=[ProfileNode.from_json(i) for i in json['nodes']],
|
||||
start_time=float(json['startTime']),
|
||||
end_time=float(json['endTime']),
|
||||
samples=[int(i) for i in json['samples']] if 'samples' in json else None,
|
||||
time_deltas=[int(i) for i in json['timeDeltas']] if 'timeDeltas' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class PositionTickInfo:
|
||||
'''
|
||||
Specifies a number of samples attributed to a certain source position.
|
||||
'''
|
||||
#: Source line number (1-based).
|
||||
line: int
|
||||
|
||||
#: Number of samples attributed to the source line.
|
||||
ticks: int
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['line'] = self.line
|
||||
json['ticks'] = self.ticks
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
line=int(json['line']),
|
||||
ticks=int(json['ticks']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class CoverageRange:
|
||||
'''
|
||||
Coverage data for a source range.
|
||||
'''
|
||||
#: JavaScript script source offset for the range start.
|
||||
start_offset: int
|
||||
|
||||
#: JavaScript script source offset for the range end.
|
||||
end_offset: int
|
||||
|
||||
#: Collected execution count of the source range.
|
||||
count: int
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['startOffset'] = self.start_offset
|
||||
json['endOffset'] = self.end_offset
|
||||
json['count'] = self.count
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
start_offset=int(json['startOffset']),
|
||||
end_offset=int(json['endOffset']),
|
||||
count=int(json['count']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class FunctionCoverage:
|
||||
'''
|
||||
Coverage data for a JavaScript function.
|
||||
'''
|
||||
#: JavaScript function name.
|
||||
function_name: str
|
||||
|
||||
#: Source ranges inside the function with coverage data.
|
||||
ranges: typing.List[CoverageRange]
|
||||
|
||||
#: Whether coverage data for this function has block granularity.
|
||||
is_block_coverage: bool
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['functionName'] = self.function_name
|
||||
json['ranges'] = [i.to_json() for i in self.ranges]
|
||||
json['isBlockCoverage'] = self.is_block_coverage
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
function_name=str(json['functionName']),
|
||||
ranges=[CoverageRange.from_json(i) for i in json['ranges']],
|
||||
is_block_coverage=bool(json['isBlockCoverage']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ScriptCoverage:
|
||||
'''
|
||||
Coverage data for a JavaScript script.
|
||||
'''
|
||||
#: JavaScript script id.
|
||||
script_id: runtime.ScriptId
|
||||
|
||||
#: JavaScript script name or url.
|
||||
url: str
|
||||
|
||||
#: Functions contained in the script that has coverage data.
|
||||
functions: typing.List[FunctionCoverage]
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['scriptId'] = self.script_id.to_json()
|
||||
json['url'] = self.url
|
||||
json['functions'] = [i.to_json() for i in self.functions]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
script_id=runtime.ScriptId.from_json(json['scriptId']),
|
||||
url=str(json['url']),
|
||||
functions=[FunctionCoverage.from_json(i) for i in json['functions']],
|
||||
)
|
||||
|
||||
|
||||
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Profiler.disable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def enable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Profiler.enable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def get_best_effort_coverage() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[ScriptCoverage]]:
|
||||
'''
|
||||
Collect coverage data for the current isolate. The coverage data may be incomplete due to
|
||||
garbage collection.
|
||||
|
||||
:returns: Coverage data for the current isolate.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Profiler.getBestEffortCoverage',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [ScriptCoverage.from_json(i) for i in json['result']]
|
||||
|
||||
|
||||
def set_sampling_interval(
|
||||
interval: int
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Changes CPU profiler sampling interval. Must be called before CPU profiles recording started.
|
||||
|
||||
:param interval: New sampling interval in microseconds.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['interval'] = interval
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Profiler.setSamplingInterval',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def start() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Profiler.start',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def start_precise_coverage(
|
||||
call_count: typing.Optional[bool] = None,
|
||||
detailed: typing.Optional[bool] = None,
|
||||
allow_triggered_updates: typing.Optional[bool] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,float]:
|
||||
'''
|
||||
Enable precise code coverage. Coverage data for JavaScript executed before enabling precise code
|
||||
coverage may be incomplete. Enabling prevents running optimized code and resets execution
|
||||
counters.
|
||||
|
||||
:param call_count: *(Optional)* Collect accurate call counts beyond simple 'covered' or 'not covered'.
|
||||
:param detailed: *(Optional)* Collect block-based coverage.
|
||||
:param allow_triggered_updates: *(Optional)* Allow the backend to send updates on its own initiative
|
||||
:returns: Monotonically increasing time (in seconds) when the coverage update was taken in the backend.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if call_count is not None:
|
||||
params['callCount'] = call_count
|
||||
if detailed is not None:
|
||||
params['detailed'] = detailed
|
||||
if allow_triggered_updates is not None:
|
||||
params['allowTriggeredUpdates'] = allow_triggered_updates
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Profiler.startPreciseCoverage',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return float(json['timestamp'])
|
||||
|
||||
|
||||
def stop() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,Profile]:
|
||||
'''
|
||||
|
||||
|
||||
:returns: Recorded profile.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Profiler.stop',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return Profile.from_json(json['profile'])
|
||||
|
||||
|
||||
def stop_precise_coverage() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Disable precise code coverage. Disabling releases unnecessary execution count records and allows
|
||||
executing optimized code.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Profiler.stopPreciseCoverage',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def take_precise_coverage() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.Tuple[typing.List[ScriptCoverage], float]]:
|
||||
'''
|
||||
Collect coverage data for the current isolate, and resets execution counters. Precise code
|
||||
coverage needs to have started.
|
||||
|
||||
:returns: A tuple with the following items:
|
||||
|
||||
0. **result** - Coverage data for the current isolate.
|
||||
1. **timestamp** - Monotonically increasing time (in seconds) when the coverage update was taken in the backend.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Profiler.takePreciseCoverage',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return (
|
||||
[ScriptCoverage.from_json(i) for i in json['result']],
|
||||
float(json['timestamp'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('Profiler.consoleProfileFinished')
|
||||
@dataclass
|
||||
class ConsoleProfileFinished:
|
||||
id_: str
|
||||
#: Location of console.profileEnd().
|
||||
location: debugger.Location
|
||||
profile: Profile
|
||||
#: Profile title passed as an argument to console.profile().
|
||||
title: typing.Optional[str]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> ConsoleProfileFinished:
|
||||
return cls(
|
||||
id_=str(json['id']),
|
||||
location=debugger.Location.from_json(json['location']),
|
||||
profile=Profile.from_json(json['profile']),
|
||||
title=str(json['title']) if 'title' in json else None
|
||||
)
|
||||
|
||||
|
||||
@event_class('Profiler.consoleProfileStarted')
|
||||
@dataclass
|
||||
class ConsoleProfileStarted:
|
||||
'''
|
||||
Sent when new profile recording is started using console.profile() call.
|
||||
'''
|
||||
id_: str
|
||||
#: Location of console.profile().
|
||||
location: debugger.Location
|
||||
#: Profile title passed as an argument to console.profile().
|
||||
title: typing.Optional[str]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> ConsoleProfileStarted:
|
||||
return cls(
|
||||
id_=str(json['id']),
|
||||
location=debugger.Location.from_json(json['location']),
|
||||
title=str(json['title']) if 'title' in json else None
|
||||
)
|
||||
|
||||
|
||||
@event_class('Profiler.preciseCoverageDeltaUpdate')
|
||||
@dataclass
|
||||
class PreciseCoverageDeltaUpdate:
|
||||
'''
|
||||
**EXPERIMENTAL**
|
||||
|
||||
Reports coverage delta since the last poll (either from an event like this, or from
|
||||
``takePreciseCoverage`` for the current isolate. May only be sent if precise code
|
||||
coverage has been started. This event can be trigged by the embedder to, for example,
|
||||
trigger collection of coverage data immediately at a certain point in time.
|
||||
'''
|
||||
#: Monotonically increasing time (in seconds) when the coverage update was taken in the backend.
|
||||
timestamp: float
|
||||
#: Identifier for distinguishing coverage events.
|
||||
occasion: str
|
||||
#: Coverage data for the current isolate.
|
||||
result: typing.List[ScriptCoverage]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> PreciseCoverageDeltaUpdate:
|
||||
return cls(
|
||||
timestamp=float(json['timestamp']),
|
||||
occasion=str(json['occasion']),
|
||||
result=[ScriptCoverage.from_json(i) for i in json['result']]
|
||||
)
|
260
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/pwa.py
Executable file
260
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/pwa.py
Executable file
@ -0,0 +1,260 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: PWA (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
from . import target
|
||||
|
||||
|
||||
@dataclass
|
||||
class FileHandlerAccept:
|
||||
'''
|
||||
The following types are the replica of
|
||||
https://crsrc.org/c/chrome/browser/web_applications/proto/web_app_os_integration_state.proto;drc=9910d3be894c8f142c977ba1023f30a656bc13fc;l=67
|
||||
'''
|
||||
#: New name of the mimetype according to
|
||||
#: https://www.iana.org/assignments/media-types/media-types.xhtml
|
||||
media_type: str
|
||||
|
||||
file_extensions: typing.List[str]
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['mediaType'] = self.media_type
|
||||
json['fileExtensions'] = [i for i in self.file_extensions]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
media_type=str(json['mediaType']),
|
||||
file_extensions=[str(i) for i in json['fileExtensions']],
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class FileHandler:
|
||||
action: str
|
||||
|
||||
accepts: typing.List[FileHandlerAccept]
|
||||
|
||||
display_name: str
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['action'] = self.action
|
||||
json['accepts'] = [i.to_json() for i in self.accepts]
|
||||
json['displayName'] = self.display_name
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
action=str(json['action']),
|
||||
accepts=[FileHandlerAccept.from_json(i) for i in json['accepts']],
|
||||
display_name=str(json['displayName']),
|
||||
)
|
||||
|
||||
|
||||
class DisplayMode(enum.Enum):
|
||||
'''
|
||||
If user prefers opening the app in browser or an app window.
|
||||
'''
|
||||
STANDALONE = "standalone"
|
||||
BROWSER = "browser"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
def get_os_app_state(
|
||||
manifest_id: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.Tuple[int, typing.List[FileHandler]]]:
|
||||
'''
|
||||
Returns the following OS state for the given manifest id.
|
||||
|
||||
:param manifest_id: The id from the webapp's manifest file, commonly it's the url of the site installing the webapp. See https://web.dev/learn/pwa/web-app-manifest.
|
||||
:returns: A tuple with the following items:
|
||||
|
||||
0. **badgeCount** -
|
||||
1. **fileHandlers** -
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['manifestId'] = manifest_id
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'PWA.getOsAppState',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return (
|
||||
int(json['badgeCount']),
|
||||
[FileHandler.from_json(i) for i in json['fileHandlers']]
|
||||
)
|
||||
|
||||
|
||||
def install(
|
||||
manifest_id: str,
|
||||
install_url_or_bundle_url: typing.Optional[str] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Installs the given manifest identity, optionally using the given install_url
|
||||
or IWA bundle location.
|
||||
|
||||
TODO(crbug.com/337872319) Support IWA to meet the following specific
|
||||
requirement.
|
||||
IWA-specific install description: If the manifest_id is isolated-app://,
|
||||
install_url_or_bundle_url is required, and can be either an http(s) URL or
|
||||
file:// URL pointing to a signed web bundle (.swbn). The .swbn file's
|
||||
signing key must correspond to manifest_id. If Chrome is not in IWA dev
|
||||
mode, the installation will fail, regardless of the state of the allowlist.
|
||||
|
||||
:param manifest_id:
|
||||
:param install_url_or_bundle_url: *(Optional)* The location of the app or bundle overriding the one derived from the manifestId.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['manifestId'] = manifest_id
|
||||
if install_url_or_bundle_url is not None:
|
||||
params['installUrlOrBundleUrl'] = install_url_or_bundle_url
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'PWA.install',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def uninstall(
|
||||
manifest_id: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Uninstalls the given manifest_id and closes any opened app windows.
|
||||
|
||||
:param manifest_id:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['manifestId'] = manifest_id
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'PWA.uninstall',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def launch(
|
||||
manifest_id: str,
|
||||
url: typing.Optional[str] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,target.TargetID]:
|
||||
'''
|
||||
Launches the installed web app, or an url in the same web app instead of the
|
||||
default start url if it is provided. Returns a page Target.TargetID which
|
||||
can be used to attach to via Target.attachToTarget or similar APIs.
|
||||
|
||||
:param manifest_id:
|
||||
:param url: *(Optional)*
|
||||
:returns: ID of the tab target created as a result.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['manifestId'] = manifest_id
|
||||
if url is not None:
|
||||
params['url'] = url
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'PWA.launch',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return target.TargetID.from_json(json['targetId'])
|
||||
|
||||
|
||||
def launch_files_in_app(
|
||||
manifest_id: str,
|
||||
files: typing.List[str]
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[target.TargetID]]:
|
||||
'''
|
||||
Opens one or more local files from an installed web app identified by its
|
||||
manifestId. The web app needs to have file handlers registered to process
|
||||
the files. The API returns one or more page Target.TargetIDs which can be
|
||||
used to attach to via Target.attachToTarget or similar APIs.
|
||||
If some files in the parameters cannot be handled by the web app, they will
|
||||
be ignored. If none of the files can be handled, this API returns an error.
|
||||
If no files are provided as the parameter, this API also returns an error.
|
||||
|
||||
According to the definition of the file handlers in the manifest file, one
|
||||
Target.TargetID may represent a page handling one or more files. The order
|
||||
of the returned Target.TargetIDs is not guaranteed.
|
||||
|
||||
TODO(crbug.com/339454034): Check the existences of the input files.
|
||||
|
||||
:param manifest_id:
|
||||
:param files:
|
||||
:returns: IDs of the tab targets created as the result.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['manifestId'] = manifest_id
|
||||
params['files'] = [i for i in files]
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'PWA.launchFilesInApp',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [target.TargetID.from_json(i) for i in json['targetIds']]
|
||||
|
||||
|
||||
def open_current_page_in_app(
|
||||
manifest_id: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Opens the current page in its web app identified by the manifest id, needs
|
||||
to be called on a page target. This function returns immediately without
|
||||
waiting for the app to finish loading.
|
||||
|
||||
:param manifest_id:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['manifestId'] = manifest_id
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'PWA.openCurrentPageInApp',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def change_app_user_settings(
|
||||
manifest_id: str,
|
||||
link_capturing: typing.Optional[bool] = None,
|
||||
display_mode: typing.Optional[DisplayMode] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Changes user settings of the web app identified by its manifestId. If the
|
||||
app was not installed, this command returns an error. Unset parameters will
|
||||
be ignored; unrecognized values will cause an error.
|
||||
|
||||
Unlike the ones defined in the manifest files of the web apps, these
|
||||
settings are provided by the browser and controlled by the users, they
|
||||
impact the way the browser handling the web apps.
|
||||
|
||||
See the comment of each parameter.
|
||||
|
||||
:param manifest_id:
|
||||
:param link_capturing: *(Optional)* If user allows the links clicked on by the user in the app's scope, or extended scope if the manifest has scope extensions and the flags ```DesktopPWAsLinkCapturingWithScopeExtensions```` and ````WebAppEnableScopeExtensions``` are enabled. Note, the API does not support resetting the linkCapturing to the initial value, uninstalling and installing the web app again will reset it. TODO(crbug.com/339453269): Setting this value on ChromeOS is not supported yet.
|
||||
:param display_mode: *(Optional)*
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['manifestId'] = manifest_id
|
||||
if link_capturing is not None:
|
||||
params['linkCapturing'] = link_capturing
|
||||
if display_mode is not None:
|
||||
params['displayMode'] = display_mode.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'PWA.changeAppUserSettings',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
1583
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/runtime.py
Executable file
1583
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/runtime.py
Executable file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,48 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: Schema
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
|
||||
@dataclass
|
||||
class Domain:
|
||||
'''
|
||||
Description of the protocol domain.
|
||||
'''
|
||||
#: Domain name.
|
||||
name: str
|
||||
|
||||
#: Domain version.
|
||||
version: str
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['name'] = self.name
|
||||
json['version'] = self.version
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
name=str(json['name']),
|
||||
version=str(json['version']),
|
||||
)
|
||||
|
||||
|
||||
def get_domains() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[Domain]]:
|
||||
'''
|
||||
Returns supported domains.
|
||||
|
||||
:returns: List of supported domains.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Schema.getDomains',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [Domain.from_json(i) for i in json['domains']]
|
507
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/security.py
Executable file
507
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/security.py
Executable file
@ -0,0 +1,507 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: Security
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
from . import network
|
||||
|
||||
|
||||
class CertificateId(int):
|
||||
'''
|
||||
An internal certificate ID value.
|
||||
'''
|
||||
def to_json(self) -> int:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: int) -> CertificateId:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'CertificateId({})'.format(super().__repr__())
|
||||
|
||||
|
||||
class MixedContentType(enum.Enum):
|
||||
'''
|
||||
A description of mixed content (HTTP resources on HTTPS pages), as defined by
|
||||
https://www.w3.org/TR/mixed-content/#categories
|
||||
'''
|
||||
BLOCKABLE = "blockable"
|
||||
OPTIONALLY_BLOCKABLE = "optionally-blockable"
|
||||
NONE = "none"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
class SecurityState(enum.Enum):
|
||||
'''
|
||||
The security level of a page or resource.
|
||||
'''
|
||||
UNKNOWN = "unknown"
|
||||
NEUTRAL = "neutral"
|
||||
INSECURE = "insecure"
|
||||
SECURE = "secure"
|
||||
INFO = "info"
|
||||
INSECURE_BROKEN = "insecure-broken"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
@dataclass
|
||||
class CertificateSecurityState:
|
||||
'''
|
||||
Details about the security state of the page certificate.
|
||||
'''
|
||||
#: Protocol name (e.g. "TLS 1.2" or "QUIC").
|
||||
protocol: str
|
||||
|
||||
#: Key Exchange used by the connection, or the empty string if not applicable.
|
||||
key_exchange: str
|
||||
|
||||
#: Cipher name.
|
||||
cipher: str
|
||||
|
||||
#: Page certificate.
|
||||
certificate: typing.List[str]
|
||||
|
||||
#: Certificate subject name.
|
||||
subject_name: str
|
||||
|
||||
#: Name of the issuing CA.
|
||||
issuer: str
|
||||
|
||||
#: Certificate valid from date.
|
||||
valid_from: network.TimeSinceEpoch
|
||||
|
||||
#: Certificate valid to (expiration) date
|
||||
valid_to: network.TimeSinceEpoch
|
||||
|
||||
#: True if the certificate uses a weak signature algorithm.
|
||||
certificate_has_weak_signature: bool
|
||||
|
||||
#: True if the certificate has a SHA1 signature in the chain.
|
||||
certificate_has_sha1_signature: bool
|
||||
|
||||
#: True if modern SSL
|
||||
modern_ssl: bool
|
||||
|
||||
#: True if the connection is using an obsolete SSL protocol.
|
||||
obsolete_ssl_protocol: bool
|
||||
|
||||
#: True if the connection is using an obsolete SSL key exchange.
|
||||
obsolete_ssl_key_exchange: bool
|
||||
|
||||
#: True if the connection is using an obsolete SSL cipher.
|
||||
obsolete_ssl_cipher: bool
|
||||
|
||||
#: True if the connection is using an obsolete SSL signature.
|
||||
obsolete_ssl_signature: bool
|
||||
|
||||
#: (EC)DH group used by the connection, if applicable.
|
||||
key_exchange_group: typing.Optional[str] = None
|
||||
|
||||
#: TLS MAC. Note that AEAD ciphers do not have separate MACs.
|
||||
mac: typing.Optional[str] = None
|
||||
|
||||
#: The highest priority network error code, if the certificate has an error.
|
||||
certificate_network_error: typing.Optional[str] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['protocol'] = self.protocol
|
||||
json['keyExchange'] = self.key_exchange
|
||||
json['cipher'] = self.cipher
|
||||
json['certificate'] = [i for i in self.certificate]
|
||||
json['subjectName'] = self.subject_name
|
||||
json['issuer'] = self.issuer
|
||||
json['validFrom'] = self.valid_from.to_json()
|
||||
json['validTo'] = self.valid_to.to_json()
|
||||
json['certificateHasWeakSignature'] = self.certificate_has_weak_signature
|
||||
json['certificateHasSha1Signature'] = self.certificate_has_sha1_signature
|
||||
json['modernSSL'] = self.modern_ssl
|
||||
json['obsoleteSslProtocol'] = self.obsolete_ssl_protocol
|
||||
json['obsoleteSslKeyExchange'] = self.obsolete_ssl_key_exchange
|
||||
json['obsoleteSslCipher'] = self.obsolete_ssl_cipher
|
||||
json['obsoleteSslSignature'] = self.obsolete_ssl_signature
|
||||
if self.key_exchange_group is not None:
|
||||
json['keyExchangeGroup'] = self.key_exchange_group
|
||||
if self.mac is not None:
|
||||
json['mac'] = self.mac
|
||||
if self.certificate_network_error is not None:
|
||||
json['certificateNetworkError'] = self.certificate_network_error
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
protocol=str(json['protocol']),
|
||||
key_exchange=str(json['keyExchange']),
|
||||
cipher=str(json['cipher']),
|
||||
certificate=[str(i) for i in json['certificate']],
|
||||
subject_name=str(json['subjectName']),
|
||||
issuer=str(json['issuer']),
|
||||
valid_from=network.TimeSinceEpoch.from_json(json['validFrom']),
|
||||
valid_to=network.TimeSinceEpoch.from_json(json['validTo']),
|
||||
certificate_has_weak_signature=bool(json['certificateHasWeakSignature']),
|
||||
certificate_has_sha1_signature=bool(json['certificateHasSha1Signature']),
|
||||
modern_ssl=bool(json['modernSSL']),
|
||||
obsolete_ssl_protocol=bool(json['obsoleteSslProtocol']),
|
||||
obsolete_ssl_key_exchange=bool(json['obsoleteSslKeyExchange']),
|
||||
obsolete_ssl_cipher=bool(json['obsoleteSslCipher']),
|
||||
obsolete_ssl_signature=bool(json['obsoleteSslSignature']),
|
||||
key_exchange_group=str(json['keyExchangeGroup']) if 'keyExchangeGroup' in json else None,
|
||||
mac=str(json['mac']) if 'mac' in json else None,
|
||||
certificate_network_error=str(json['certificateNetworkError']) if 'certificateNetworkError' in json else None,
|
||||
)
|
||||
|
||||
|
||||
class SafetyTipStatus(enum.Enum):
|
||||
BAD_REPUTATION = "badReputation"
|
||||
LOOKALIKE = "lookalike"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
@dataclass
|
||||
class SafetyTipInfo:
|
||||
#: Describes whether the page triggers any safety tips or reputation warnings. Default is unknown.
|
||||
safety_tip_status: SafetyTipStatus
|
||||
|
||||
#: The URL the safety tip suggested ("Did you mean?"). Only filled in for lookalike matches.
|
||||
safe_url: typing.Optional[str] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['safetyTipStatus'] = self.safety_tip_status.to_json()
|
||||
if self.safe_url is not None:
|
||||
json['safeUrl'] = self.safe_url
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
safety_tip_status=SafetyTipStatus.from_json(json['safetyTipStatus']),
|
||||
safe_url=str(json['safeUrl']) if 'safeUrl' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class VisibleSecurityState:
|
||||
'''
|
||||
Security state information about the page.
|
||||
'''
|
||||
#: The security level of the page.
|
||||
security_state: SecurityState
|
||||
|
||||
#: Array of security state issues ids.
|
||||
security_state_issue_ids: typing.List[str]
|
||||
|
||||
#: Security state details about the page certificate.
|
||||
certificate_security_state: typing.Optional[CertificateSecurityState] = None
|
||||
|
||||
#: The type of Safety Tip triggered on the page. Note that this field will be set even if the Safety Tip UI was not actually shown.
|
||||
safety_tip_info: typing.Optional[SafetyTipInfo] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['securityState'] = self.security_state.to_json()
|
||||
json['securityStateIssueIds'] = [i for i in self.security_state_issue_ids]
|
||||
if self.certificate_security_state is not None:
|
||||
json['certificateSecurityState'] = self.certificate_security_state.to_json()
|
||||
if self.safety_tip_info is not None:
|
||||
json['safetyTipInfo'] = self.safety_tip_info.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
security_state=SecurityState.from_json(json['securityState']),
|
||||
security_state_issue_ids=[str(i) for i in json['securityStateIssueIds']],
|
||||
certificate_security_state=CertificateSecurityState.from_json(json['certificateSecurityState']) if 'certificateSecurityState' in json else None,
|
||||
safety_tip_info=SafetyTipInfo.from_json(json['safetyTipInfo']) if 'safetyTipInfo' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class SecurityStateExplanation:
|
||||
'''
|
||||
An explanation of an factor contributing to the security state.
|
||||
'''
|
||||
#: Security state representing the severity of the factor being explained.
|
||||
security_state: SecurityState
|
||||
|
||||
#: Title describing the type of factor.
|
||||
title: str
|
||||
|
||||
#: Short phrase describing the type of factor.
|
||||
summary: str
|
||||
|
||||
#: Full text explanation of the factor.
|
||||
description: str
|
||||
|
||||
#: The type of mixed content described by the explanation.
|
||||
mixed_content_type: MixedContentType
|
||||
|
||||
#: Page certificate.
|
||||
certificate: typing.List[str]
|
||||
|
||||
#: Recommendations to fix any issues.
|
||||
recommendations: typing.Optional[typing.List[str]] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['securityState'] = self.security_state.to_json()
|
||||
json['title'] = self.title
|
||||
json['summary'] = self.summary
|
||||
json['description'] = self.description
|
||||
json['mixedContentType'] = self.mixed_content_type.to_json()
|
||||
json['certificate'] = [i for i in self.certificate]
|
||||
if self.recommendations is not None:
|
||||
json['recommendations'] = [i for i in self.recommendations]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
security_state=SecurityState.from_json(json['securityState']),
|
||||
title=str(json['title']),
|
||||
summary=str(json['summary']),
|
||||
description=str(json['description']),
|
||||
mixed_content_type=MixedContentType.from_json(json['mixedContentType']),
|
||||
certificate=[str(i) for i in json['certificate']],
|
||||
recommendations=[str(i) for i in json['recommendations']] if 'recommendations' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class InsecureContentStatus:
|
||||
'''
|
||||
Information about insecure content on the page.
|
||||
'''
|
||||
#: Always false.
|
||||
ran_mixed_content: bool
|
||||
|
||||
#: Always false.
|
||||
displayed_mixed_content: bool
|
||||
|
||||
#: Always false.
|
||||
contained_mixed_form: bool
|
||||
|
||||
#: Always false.
|
||||
ran_content_with_cert_errors: bool
|
||||
|
||||
#: Always false.
|
||||
displayed_content_with_cert_errors: bool
|
||||
|
||||
#: Always set to unknown.
|
||||
ran_insecure_content_style: SecurityState
|
||||
|
||||
#: Always set to unknown.
|
||||
displayed_insecure_content_style: SecurityState
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['ranMixedContent'] = self.ran_mixed_content
|
||||
json['displayedMixedContent'] = self.displayed_mixed_content
|
||||
json['containedMixedForm'] = self.contained_mixed_form
|
||||
json['ranContentWithCertErrors'] = self.ran_content_with_cert_errors
|
||||
json['displayedContentWithCertErrors'] = self.displayed_content_with_cert_errors
|
||||
json['ranInsecureContentStyle'] = self.ran_insecure_content_style.to_json()
|
||||
json['displayedInsecureContentStyle'] = self.displayed_insecure_content_style.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
ran_mixed_content=bool(json['ranMixedContent']),
|
||||
displayed_mixed_content=bool(json['displayedMixedContent']),
|
||||
contained_mixed_form=bool(json['containedMixedForm']),
|
||||
ran_content_with_cert_errors=bool(json['ranContentWithCertErrors']),
|
||||
displayed_content_with_cert_errors=bool(json['displayedContentWithCertErrors']),
|
||||
ran_insecure_content_style=SecurityState.from_json(json['ranInsecureContentStyle']),
|
||||
displayed_insecure_content_style=SecurityState.from_json(json['displayedInsecureContentStyle']),
|
||||
)
|
||||
|
||||
|
||||
class CertificateErrorAction(enum.Enum):
|
||||
'''
|
||||
The action to take when a certificate error occurs. continue will continue processing the
|
||||
request and cancel will cancel the request.
|
||||
'''
|
||||
CONTINUE = "continue"
|
||||
CANCEL = "cancel"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Disables tracking security state changes.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Security.disable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def enable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enables tracking security state changes.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Security.enable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_ignore_certificate_errors(
|
||||
ignore: bool
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enable/disable whether all certificate errors should be ignored.
|
||||
|
||||
:param ignore: If true, all certificate errors will be ignored.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['ignore'] = ignore
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Security.setIgnoreCertificateErrors',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def handle_certificate_error(
|
||||
event_id: int,
|
||||
action: CertificateErrorAction
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Handles a certificate error that fired a certificateError event.
|
||||
|
||||
:param event_id: The ID of the event.
|
||||
:param action: The action to take on the certificate error.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['eventId'] = event_id
|
||||
params['action'] = action.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Security.handleCertificateError',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_override_certificate_errors(
|
||||
override: bool
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enable/disable overriding certificate errors. If enabled, all certificate error events need to
|
||||
be handled by the DevTools client and should be answered with ``handleCertificateError`` commands.
|
||||
|
||||
:param override: If true, certificate errors will be overridden.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['override'] = override
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Security.setOverrideCertificateErrors',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
@event_class('Security.certificateError')
|
||||
@dataclass
|
||||
class CertificateError:
|
||||
'''
|
||||
There is a certificate error. If overriding certificate errors is enabled, then it should be
|
||||
handled with the ``handleCertificateError`` command. Note: this event does not fire if the
|
||||
certificate error has been allowed internally. Only one client per target should override
|
||||
certificate errors at the same time.
|
||||
'''
|
||||
#: The ID of the event.
|
||||
event_id: int
|
||||
#: The type of the error.
|
||||
error_type: str
|
||||
#: The url that was requested.
|
||||
request_url: str
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> CertificateError:
|
||||
return cls(
|
||||
event_id=int(json['eventId']),
|
||||
error_type=str(json['errorType']),
|
||||
request_url=str(json['requestURL'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('Security.visibleSecurityStateChanged')
|
||||
@dataclass
|
||||
class VisibleSecurityStateChanged:
|
||||
'''
|
||||
**EXPERIMENTAL**
|
||||
|
||||
The security state of the page changed.
|
||||
'''
|
||||
#: Security state information about the page.
|
||||
visible_security_state: VisibleSecurityState
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> VisibleSecurityStateChanged:
|
||||
return cls(
|
||||
visible_security_state=VisibleSecurityState.from_json(json['visibleSecurityState'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('Security.securityStateChanged')
|
||||
@dataclass
|
||||
class SecurityStateChanged:
|
||||
'''
|
||||
The security state of the page changed. No longer being sent.
|
||||
'''
|
||||
#: Security state.
|
||||
security_state: SecurityState
|
||||
#: True if the page was loaded over cryptographic transport such as HTTPS.
|
||||
scheme_is_cryptographic: bool
|
||||
#: Previously a list of explanations for the security state. Now always
|
||||
#: empty.
|
||||
explanations: typing.List[SecurityStateExplanation]
|
||||
#: Information about insecure content on the page.
|
||||
insecure_content_status: InsecureContentStatus
|
||||
#: Overrides user-visible description of the state. Always omitted.
|
||||
summary: typing.Optional[str]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> SecurityStateChanged:
|
||||
return cls(
|
||||
security_state=SecurityState.from_json(json['securityState']),
|
||||
scheme_is_cryptographic=bool(json['schemeIsCryptographic']),
|
||||
explanations=[SecurityStateExplanation.from_json(i) for i in json['explanations']],
|
||||
insecure_content_status=InsecureContentStatus.from_json(json['insecureContentStatus']),
|
||||
summary=str(json['summary']) if 'summary' in json else None
|
||||
)
|
@ -0,0 +1,414 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: ServiceWorker (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
from . import target
|
||||
|
||||
|
||||
class RegistrationID(str):
|
||||
def to_json(self) -> str:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: str) -> RegistrationID:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'RegistrationID({})'.format(super().__repr__())
|
||||
|
||||
|
||||
@dataclass
|
||||
class ServiceWorkerRegistration:
|
||||
'''
|
||||
ServiceWorker registration.
|
||||
'''
|
||||
registration_id: RegistrationID
|
||||
|
||||
scope_url: str
|
||||
|
||||
is_deleted: bool
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['registrationId'] = self.registration_id.to_json()
|
||||
json['scopeURL'] = self.scope_url
|
||||
json['isDeleted'] = self.is_deleted
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
registration_id=RegistrationID.from_json(json['registrationId']),
|
||||
scope_url=str(json['scopeURL']),
|
||||
is_deleted=bool(json['isDeleted']),
|
||||
)
|
||||
|
||||
|
||||
class ServiceWorkerVersionRunningStatus(enum.Enum):
|
||||
STOPPED = "stopped"
|
||||
STARTING = "starting"
|
||||
RUNNING = "running"
|
||||
STOPPING = "stopping"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
class ServiceWorkerVersionStatus(enum.Enum):
|
||||
NEW = "new"
|
||||
INSTALLING = "installing"
|
||||
INSTALLED = "installed"
|
||||
ACTIVATING = "activating"
|
||||
ACTIVATED = "activated"
|
||||
REDUNDANT = "redundant"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ServiceWorkerVersion:
|
||||
'''
|
||||
ServiceWorker version.
|
||||
'''
|
||||
version_id: str
|
||||
|
||||
registration_id: RegistrationID
|
||||
|
||||
script_url: str
|
||||
|
||||
running_status: ServiceWorkerVersionRunningStatus
|
||||
|
||||
status: ServiceWorkerVersionStatus
|
||||
|
||||
#: The Last-Modified header value of the main script.
|
||||
script_last_modified: typing.Optional[float] = None
|
||||
|
||||
#: The time at which the response headers of the main script were received from the server.
|
||||
#: For cached script it is the last time the cache entry was validated.
|
||||
script_response_time: typing.Optional[float] = None
|
||||
|
||||
controlled_clients: typing.Optional[typing.List[target.TargetID]] = None
|
||||
|
||||
target_id: typing.Optional[target.TargetID] = None
|
||||
|
||||
router_rules: typing.Optional[str] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['versionId'] = self.version_id
|
||||
json['registrationId'] = self.registration_id.to_json()
|
||||
json['scriptURL'] = self.script_url
|
||||
json['runningStatus'] = self.running_status.to_json()
|
||||
json['status'] = self.status.to_json()
|
||||
if self.script_last_modified is not None:
|
||||
json['scriptLastModified'] = self.script_last_modified
|
||||
if self.script_response_time is not None:
|
||||
json['scriptResponseTime'] = self.script_response_time
|
||||
if self.controlled_clients is not None:
|
||||
json['controlledClients'] = [i.to_json() for i in self.controlled_clients]
|
||||
if self.target_id is not None:
|
||||
json['targetId'] = self.target_id.to_json()
|
||||
if self.router_rules is not None:
|
||||
json['routerRules'] = self.router_rules
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
version_id=str(json['versionId']),
|
||||
registration_id=RegistrationID.from_json(json['registrationId']),
|
||||
script_url=str(json['scriptURL']),
|
||||
running_status=ServiceWorkerVersionRunningStatus.from_json(json['runningStatus']),
|
||||
status=ServiceWorkerVersionStatus.from_json(json['status']),
|
||||
script_last_modified=float(json['scriptLastModified']) if 'scriptLastModified' in json else None,
|
||||
script_response_time=float(json['scriptResponseTime']) if 'scriptResponseTime' in json else None,
|
||||
controlled_clients=[target.TargetID.from_json(i) for i in json['controlledClients']] if 'controlledClients' in json else None,
|
||||
target_id=target.TargetID.from_json(json['targetId']) if 'targetId' in json else None,
|
||||
router_rules=str(json['routerRules']) if 'routerRules' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ServiceWorkerErrorMessage:
|
||||
'''
|
||||
ServiceWorker error message.
|
||||
'''
|
||||
error_message: str
|
||||
|
||||
registration_id: RegistrationID
|
||||
|
||||
version_id: str
|
||||
|
||||
source_url: str
|
||||
|
||||
line_number: int
|
||||
|
||||
column_number: int
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['errorMessage'] = self.error_message
|
||||
json['registrationId'] = self.registration_id.to_json()
|
||||
json['versionId'] = self.version_id
|
||||
json['sourceURL'] = self.source_url
|
||||
json['lineNumber'] = self.line_number
|
||||
json['columnNumber'] = self.column_number
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
error_message=str(json['errorMessage']),
|
||||
registration_id=RegistrationID.from_json(json['registrationId']),
|
||||
version_id=str(json['versionId']),
|
||||
source_url=str(json['sourceURL']),
|
||||
line_number=int(json['lineNumber']),
|
||||
column_number=int(json['columnNumber']),
|
||||
)
|
||||
|
||||
|
||||
def deliver_push_message(
|
||||
origin: str,
|
||||
registration_id: RegistrationID,
|
||||
data: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
:param origin:
|
||||
:param registration_id:
|
||||
:param data:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['origin'] = origin
|
||||
params['registrationId'] = registration_id.to_json()
|
||||
params['data'] = data
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'ServiceWorker.deliverPushMessage',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'ServiceWorker.disable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def dispatch_sync_event(
|
||||
origin: str,
|
||||
registration_id: RegistrationID,
|
||||
tag: str,
|
||||
last_chance: bool
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
:param origin:
|
||||
:param registration_id:
|
||||
:param tag:
|
||||
:param last_chance:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['origin'] = origin
|
||||
params['registrationId'] = registration_id.to_json()
|
||||
params['tag'] = tag
|
||||
params['lastChance'] = last_chance
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'ServiceWorker.dispatchSyncEvent',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def dispatch_periodic_sync_event(
|
||||
origin: str,
|
||||
registration_id: RegistrationID,
|
||||
tag: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
:param origin:
|
||||
:param registration_id:
|
||||
:param tag:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['origin'] = origin
|
||||
params['registrationId'] = registration_id.to_json()
|
||||
params['tag'] = tag
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'ServiceWorker.dispatchPeriodicSyncEvent',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def enable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'ServiceWorker.enable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def inspect_worker(
|
||||
version_id: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
:param version_id:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['versionId'] = version_id
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'ServiceWorker.inspectWorker',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_force_update_on_page_load(
|
||||
force_update_on_page_load: bool
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
:param force_update_on_page_load:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['forceUpdateOnPageLoad'] = force_update_on_page_load
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'ServiceWorker.setForceUpdateOnPageLoad',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def skip_waiting(
|
||||
scope_url: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
:param scope_url:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['scopeURL'] = scope_url
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'ServiceWorker.skipWaiting',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def start_worker(
|
||||
scope_url: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
:param scope_url:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['scopeURL'] = scope_url
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'ServiceWorker.startWorker',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def stop_all_workers() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'ServiceWorker.stopAllWorkers',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def stop_worker(
|
||||
version_id: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
:param version_id:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['versionId'] = version_id
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'ServiceWorker.stopWorker',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def unregister(
|
||||
scope_url: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
:param scope_url:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['scopeURL'] = scope_url
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'ServiceWorker.unregister',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def update_registration(
|
||||
scope_url: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
:param scope_url:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['scopeURL'] = scope_url
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'ServiceWorker.updateRegistration',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
@event_class('ServiceWorker.workerErrorReported')
|
||||
@dataclass
|
||||
class WorkerErrorReported:
|
||||
error_message: ServiceWorkerErrorMessage
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> WorkerErrorReported:
|
||||
return cls(
|
||||
error_message=ServiceWorkerErrorMessage.from_json(json['errorMessage'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('ServiceWorker.workerRegistrationUpdated')
|
||||
@dataclass
|
||||
class WorkerRegistrationUpdated:
|
||||
registrations: typing.List[ServiceWorkerRegistration]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> WorkerRegistrationUpdated:
|
||||
return cls(
|
||||
registrations=[ServiceWorkerRegistration.from_json(i) for i in json['registrations']]
|
||||
)
|
||||
|
||||
|
||||
@event_class('ServiceWorker.workerVersionUpdated')
|
||||
@dataclass
|
||||
class WorkerVersionUpdated:
|
||||
versions: typing.List[ServiceWorkerVersion]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> WorkerVersionUpdated:
|
||||
return cls(
|
||||
versions=[ServiceWorkerVersion.from_json(i) for i in json['versions']]
|
||||
)
|
2090
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/storage.py
Executable file
2090
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/storage.py
Executable file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,366 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: SystemInfo (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
|
||||
@dataclass
|
||||
class GPUDevice:
|
||||
'''
|
||||
Describes a single graphics processor (GPU).
|
||||
'''
|
||||
#: PCI ID of the GPU vendor, if available; 0 otherwise.
|
||||
vendor_id: float
|
||||
|
||||
#: PCI ID of the GPU device, if available; 0 otherwise.
|
||||
device_id: float
|
||||
|
||||
#: String description of the GPU vendor, if the PCI ID is not available.
|
||||
vendor_string: str
|
||||
|
||||
#: String description of the GPU device, if the PCI ID is not available.
|
||||
device_string: str
|
||||
|
||||
#: String description of the GPU driver vendor.
|
||||
driver_vendor: str
|
||||
|
||||
#: String description of the GPU driver version.
|
||||
driver_version: str
|
||||
|
||||
#: Sub sys ID of the GPU, only available on Windows.
|
||||
sub_sys_id: typing.Optional[float] = None
|
||||
|
||||
#: Revision of the GPU, only available on Windows.
|
||||
revision: typing.Optional[float] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['vendorId'] = self.vendor_id
|
||||
json['deviceId'] = self.device_id
|
||||
json['vendorString'] = self.vendor_string
|
||||
json['deviceString'] = self.device_string
|
||||
json['driverVendor'] = self.driver_vendor
|
||||
json['driverVersion'] = self.driver_version
|
||||
if self.sub_sys_id is not None:
|
||||
json['subSysId'] = self.sub_sys_id
|
||||
if self.revision is not None:
|
||||
json['revision'] = self.revision
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
vendor_id=float(json['vendorId']),
|
||||
device_id=float(json['deviceId']),
|
||||
vendor_string=str(json['vendorString']),
|
||||
device_string=str(json['deviceString']),
|
||||
driver_vendor=str(json['driverVendor']),
|
||||
driver_version=str(json['driverVersion']),
|
||||
sub_sys_id=float(json['subSysId']) if 'subSysId' in json else None,
|
||||
revision=float(json['revision']) if 'revision' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class Size:
|
||||
'''
|
||||
Describes the width and height dimensions of an entity.
|
||||
'''
|
||||
#: Width in pixels.
|
||||
width: int
|
||||
|
||||
#: Height in pixels.
|
||||
height: int
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['width'] = self.width
|
||||
json['height'] = self.height
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
width=int(json['width']),
|
||||
height=int(json['height']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class VideoDecodeAcceleratorCapability:
|
||||
'''
|
||||
Describes a supported video decoding profile with its associated minimum and
|
||||
maximum resolutions.
|
||||
'''
|
||||
#: Video codec profile that is supported, e.g. VP9 Profile 2.
|
||||
profile: str
|
||||
|
||||
#: Maximum video dimensions in pixels supported for this ``profile``.
|
||||
max_resolution: Size
|
||||
|
||||
#: Minimum video dimensions in pixels supported for this ``profile``.
|
||||
min_resolution: Size
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['profile'] = self.profile
|
||||
json['maxResolution'] = self.max_resolution.to_json()
|
||||
json['minResolution'] = self.min_resolution.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
profile=str(json['profile']),
|
||||
max_resolution=Size.from_json(json['maxResolution']),
|
||||
min_resolution=Size.from_json(json['minResolution']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class VideoEncodeAcceleratorCapability:
|
||||
'''
|
||||
Describes a supported video encoding profile with its associated maximum
|
||||
resolution and maximum framerate.
|
||||
'''
|
||||
#: Video codec profile that is supported, e.g H264 Main.
|
||||
profile: str
|
||||
|
||||
#: Maximum video dimensions in pixels supported for this ``profile``.
|
||||
max_resolution: Size
|
||||
|
||||
#: Maximum encoding framerate in frames per second supported for this
|
||||
#: ``profile``, as fraction's numerator and denominator, e.g. 24/1 fps,
|
||||
#: 24000/1001 fps, etc.
|
||||
max_framerate_numerator: int
|
||||
|
||||
max_framerate_denominator: int
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['profile'] = self.profile
|
||||
json['maxResolution'] = self.max_resolution.to_json()
|
||||
json['maxFramerateNumerator'] = self.max_framerate_numerator
|
||||
json['maxFramerateDenominator'] = self.max_framerate_denominator
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
profile=str(json['profile']),
|
||||
max_resolution=Size.from_json(json['maxResolution']),
|
||||
max_framerate_numerator=int(json['maxFramerateNumerator']),
|
||||
max_framerate_denominator=int(json['maxFramerateDenominator']),
|
||||
)
|
||||
|
||||
|
||||
class SubsamplingFormat(enum.Enum):
|
||||
'''
|
||||
YUV subsampling type of the pixels of a given image.
|
||||
'''
|
||||
YUV420 = "yuv420"
|
||||
YUV422 = "yuv422"
|
||||
YUV444 = "yuv444"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
class ImageType(enum.Enum):
|
||||
'''
|
||||
Image format of a given image.
|
||||
'''
|
||||
JPEG = "jpeg"
|
||||
WEBP = "webp"
|
||||
UNKNOWN = "unknown"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ImageDecodeAcceleratorCapability:
|
||||
'''
|
||||
Describes a supported image decoding profile with its associated minimum and
|
||||
maximum resolutions and subsampling.
|
||||
'''
|
||||
#: Image coded, e.g. Jpeg.
|
||||
image_type: ImageType
|
||||
|
||||
#: Maximum supported dimensions of the image in pixels.
|
||||
max_dimensions: Size
|
||||
|
||||
#: Minimum supported dimensions of the image in pixels.
|
||||
min_dimensions: Size
|
||||
|
||||
#: Optional array of supported subsampling formats, e.g. 4:2:0, if known.
|
||||
subsamplings: typing.List[SubsamplingFormat]
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['imageType'] = self.image_type.to_json()
|
||||
json['maxDimensions'] = self.max_dimensions.to_json()
|
||||
json['minDimensions'] = self.min_dimensions.to_json()
|
||||
json['subsamplings'] = [i.to_json() for i in self.subsamplings]
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
image_type=ImageType.from_json(json['imageType']),
|
||||
max_dimensions=Size.from_json(json['maxDimensions']),
|
||||
min_dimensions=Size.from_json(json['minDimensions']),
|
||||
subsamplings=[SubsamplingFormat.from_json(i) for i in json['subsamplings']],
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class GPUInfo:
|
||||
'''
|
||||
Provides information about the GPU(s) on the system.
|
||||
'''
|
||||
#: The graphics devices on the system. Element 0 is the primary GPU.
|
||||
devices: typing.List[GPUDevice]
|
||||
|
||||
#: An optional array of GPU driver bug workarounds.
|
||||
driver_bug_workarounds: typing.List[str]
|
||||
|
||||
#: Supported accelerated video decoding capabilities.
|
||||
video_decoding: typing.List[VideoDecodeAcceleratorCapability]
|
||||
|
||||
#: Supported accelerated video encoding capabilities.
|
||||
video_encoding: typing.List[VideoEncodeAcceleratorCapability]
|
||||
|
||||
#: Supported accelerated image decoding capabilities.
|
||||
image_decoding: typing.List[ImageDecodeAcceleratorCapability]
|
||||
|
||||
#: An optional dictionary of additional GPU related attributes.
|
||||
aux_attributes: typing.Optional[dict] = None
|
||||
|
||||
#: An optional dictionary of graphics features and their status.
|
||||
feature_status: typing.Optional[dict] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['devices'] = [i.to_json() for i in self.devices]
|
||||
json['driverBugWorkarounds'] = [i for i in self.driver_bug_workarounds]
|
||||
json['videoDecoding'] = [i.to_json() for i in self.video_decoding]
|
||||
json['videoEncoding'] = [i.to_json() for i in self.video_encoding]
|
||||
json['imageDecoding'] = [i.to_json() for i in self.image_decoding]
|
||||
if self.aux_attributes is not None:
|
||||
json['auxAttributes'] = self.aux_attributes
|
||||
if self.feature_status is not None:
|
||||
json['featureStatus'] = self.feature_status
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
devices=[GPUDevice.from_json(i) for i in json['devices']],
|
||||
driver_bug_workarounds=[str(i) for i in json['driverBugWorkarounds']],
|
||||
video_decoding=[VideoDecodeAcceleratorCapability.from_json(i) for i in json['videoDecoding']],
|
||||
video_encoding=[VideoEncodeAcceleratorCapability.from_json(i) for i in json['videoEncoding']],
|
||||
image_decoding=[ImageDecodeAcceleratorCapability.from_json(i) for i in json['imageDecoding']],
|
||||
aux_attributes=dict(json['auxAttributes']) if 'auxAttributes' in json else None,
|
||||
feature_status=dict(json['featureStatus']) if 'featureStatus' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ProcessInfo:
|
||||
'''
|
||||
Represents process info.
|
||||
'''
|
||||
#: Specifies process type.
|
||||
type_: str
|
||||
|
||||
#: Specifies process id.
|
||||
id_: int
|
||||
|
||||
#: Specifies cumulative CPU usage in seconds across all threads of the
|
||||
#: process since the process start.
|
||||
cpu_time: float
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['type'] = self.type_
|
||||
json['id'] = self.id_
|
||||
json['cpuTime'] = self.cpu_time
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
type_=str(json['type']),
|
||||
id_=int(json['id']),
|
||||
cpu_time=float(json['cpuTime']),
|
||||
)
|
||||
|
||||
|
||||
def get_info() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.Tuple[GPUInfo, str, str, str]]:
|
||||
'''
|
||||
Returns information about the system.
|
||||
|
||||
:returns: A tuple with the following items:
|
||||
|
||||
0. **gpu** - Information about the GPUs on the system.
|
||||
1. **modelName** - A platform-dependent description of the model of the machine. On Mac OS, this is, for example, 'MacBookPro'. Will be the empty string if not supported.
|
||||
2. **modelVersion** - A platform-dependent description of the version of the machine. On Mac OS, this is, for example, '10.1'. Will be the empty string if not supported.
|
||||
3. **commandLine** - The command line string used to launch the browser. Will be the empty string if not supported.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'SystemInfo.getInfo',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return (
|
||||
GPUInfo.from_json(json['gpu']),
|
||||
str(json['modelName']),
|
||||
str(json['modelVersion']),
|
||||
str(json['commandLine'])
|
||||
)
|
||||
|
||||
|
||||
def get_feature_state(
|
||||
feature_state: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,bool]:
|
||||
'''
|
||||
Returns information about the feature state.
|
||||
|
||||
:param feature_state:
|
||||
:returns:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['featureState'] = feature_state
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'SystemInfo.getFeatureState',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return bool(json['featureEnabled'])
|
||||
|
||||
|
||||
def get_process_info() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[ProcessInfo]]:
|
||||
'''
|
||||
Returns information about all running processes.
|
||||
|
||||
:returns: An array of process info blocks.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'SystemInfo.getProcessInfo',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [ProcessInfo.from_json(i) for i in json['processInfo']]
|
713
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/target.py
Executable file
713
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/target.py
Executable file
@ -0,0 +1,713 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: Target
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
from . import browser
|
||||
from . import page
|
||||
|
||||
|
||||
class TargetID(str):
|
||||
def to_json(self) -> str:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: str) -> TargetID:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'TargetID({})'.format(super().__repr__())
|
||||
|
||||
|
||||
class SessionID(str):
|
||||
'''
|
||||
Unique identifier of attached debugging session.
|
||||
'''
|
||||
def to_json(self) -> str:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: str) -> SessionID:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'SessionID({})'.format(super().__repr__())
|
||||
|
||||
|
||||
@dataclass
|
||||
class TargetInfo:
|
||||
target_id: TargetID
|
||||
|
||||
#: List of types: https://source.chromium.org/chromium/chromium/src/+/main:content/browser/devtools/devtools_agent_host_impl.cc?ss=chromium&q=f:devtools%20-f:out%20%22::kTypeTab%5B%5D%22
|
||||
type_: str
|
||||
|
||||
title: str
|
||||
|
||||
url: str
|
||||
|
||||
#: Whether the target has an attached client.
|
||||
attached: bool
|
||||
|
||||
#: Whether the target has access to the originating window.
|
||||
can_access_opener: bool
|
||||
|
||||
#: Opener target Id
|
||||
opener_id: typing.Optional[TargetID] = None
|
||||
|
||||
#: Frame id of originating window (is only set if target has an opener).
|
||||
opener_frame_id: typing.Optional[page.FrameId] = None
|
||||
|
||||
browser_context_id: typing.Optional[browser.BrowserContextID] = None
|
||||
|
||||
#: Provides additional details for specific target types. For example, for
|
||||
#: the type of "page", this may be set to "prerender".
|
||||
subtype: typing.Optional[str] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['targetId'] = self.target_id.to_json()
|
||||
json['type'] = self.type_
|
||||
json['title'] = self.title
|
||||
json['url'] = self.url
|
||||
json['attached'] = self.attached
|
||||
json['canAccessOpener'] = self.can_access_opener
|
||||
if self.opener_id is not None:
|
||||
json['openerId'] = self.opener_id.to_json()
|
||||
if self.opener_frame_id is not None:
|
||||
json['openerFrameId'] = self.opener_frame_id.to_json()
|
||||
if self.browser_context_id is not None:
|
||||
json['browserContextId'] = self.browser_context_id.to_json()
|
||||
if self.subtype is not None:
|
||||
json['subtype'] = self.subtype
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
target_id=TargetID.from_json(json['targetId']),
|
||||
type_=str(json['type']),
|
||||
title=str(json['title']),
|
||||
url=str(json['url']),
|
||||
attached=bool(json['attached']),
|
||||
can_access_opener=bool(json['canAccessOpener']),
|
||||
opener_id=TargetID.from_json(json['openerId']) if 'openerId' in json else None,
|
||||
opener_frame_id=page.FrameId.from_json(json['openerFrameId']) if 'openerFrameId' in json else None,
|
||||
browser_context_id=browser.BrowserContextID.from_json(json['browserContextId']) if 'browserContextId' in json else None,
|
||||
subtype=str(json['subtype']) if 'subtype' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class FilterEntry:
|
||||
'''
|
||||
A filter used by target query/discovery/auto-attach operations.
|
||||
'''
|
||||
#: If set, causes exclusion of matching targets from the list.
|
||||
exclude: typing.Optional[bool] = None
|
||||
|
||||
#: If not present, matches any type.
|
||||
type_: typing.Optional[str] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
if self.exclude is not None:
|
||||
json['exclude'] = self.exclude
|
||||
if self.type_ is not None:
|
||||
json['type'] = self.type_
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
exclude=bool(json['exclude']) if 'exclude' in json else None,
|
||||
type_=str(json['type']) if 'type' in json else None,
|
||||
)
|
||||
|
||||
|
||||
class TargetFilter(list):
|
||||
'''
|
||||
The entries in TargetFilter are matched sequentially against targets and
|
||||
the first entry that matches determines if the target is included or not,
|
||||
depending on the value of ``exclude`` field in the entry.
|
||||
If filter is not specified, the one assumed is
|
||||
[{type: "browser", exclude: true}, {type: "tab", exclude: true}, {}]
|
||||
(i.e. include everything but ``browser`` and ``tab``).
|
||||
'''
|
||||
def to_json(self) -> typing.List[FilterEntry]:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: typing.List[FilterEntry]) -> TargetFilter:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'TargetFilter({})'.format(super().__repr__())
|
||||
|
||||
|
||||
@dataclass
|
||||
class RemoteLocation:
|
||||
host: str
|
||||
|
||||
port: int
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['host'] = self.host
|
||||
json['port'] = self.port
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
host=str(json['host']),
|
||||
port=int(json['port']),
|
||||
)
|
||||
|
||||
|
||||
def activate_target(
|
||||
target_id: TargetID
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Activates (focuses) the target.
|
||||
|
||||
:param target_id:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['targetId'] = target_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Target.activateTarget',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def attach_to_target(
|
||||
target_id: TargetID,
|
||||
flatten: typing.Optional[bool] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,SessionID]:
|
||||
'''
|
||||
Attaches to the target with given id.
|
||||
|
||||
:param target_id:
|
||||
:param flatten: *(Optional)* Enables "flat" access to the session via specifying sessionId attribute in the commands. We plan to make this the default, deprecate non-flattened mode, and eventually retire it. See crbug.com/991325.
|
||||
:returns: Id assigned to the session.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['targetId'] = target_id.to_json()
|
||||
if flatten is not None:
|
||||
params['flatten'] = flatten
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Target.attachToTarget',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return SessionID.from_json(json['sessionId'])
|
||||
|
||||
|
||||
def attach_to_browser_target() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,SessionID]:
|
||||
'''
|
||||
Attaches to the browser target, only uses flat sessionId mode.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:returns: Id assigned to the session.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Target.attachToBrowserTarget',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return SessionID.from_json(json['sessionId'])
|
||||
|
||||
|
||||
def close_target(
|
||||
target_id: TargetID
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,bool]:
|
||||
'''
|
||||
Closes the target. If the target is a page that gets closed too.
|
||||
|
||||
:param target_id:
|
||||
:returns: Always set to true. If an error occurs, the response indicates protocol error.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['targetId'] = target_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Target.closeTarget',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return bool(json['success'])
|
||||
|
||||
|
||||
def expose_dev_tools_protocol(
|
||||
target_id: TargetID,
|
||||
binding_name: typing.Optional[str] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Inject object to the target's main frame that provides a communication
|
||||
channel with browser target.
|
||||
|
||||
Injected object will be available as ``window[bindingName]``.
|
||||
|
||||
The object has the following API:
|
||||
- ``binding.send(json)`` - a method to send messages over the remote debugging protocol
|
||||
- ``binding.onmessage = json => handleMessage(json)`` - a callback that will be called for the protocol notifications and command responses.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param target_id:
|
||||
:param binding_name: *(Optional)* Binding name, 'cdp' if not specified.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['targetId'] = target_id.to_json()
|
||||
if binding_name is not None:
|
||||
params['bindingName'] = binding_name
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Target.exposeDevToolsProtocol',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def create_browser_context(
|
||||
dispose_on_detach: typing.Optional[bool] = None,
|
||||
proxy_server: typing.Optional[str] = None,
|
||||
proxy_bypass_list: typing.Optional[str] = None,
|
||||
origins_with_universal_network_access: typing.Optional[typing.List[str]] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,browser.BrowserContextID]:
|
||||
'''
|
||||
Creates a new empty BrowserContext. Similar to an incognito profile but you can have more than
|
||||
one.
|
||||
|
||||
:param dispose_on_detach: **(EXPERIMENTAL)** *(Optional)* If specified, disposes this context when debugging session disconnects.
|
||||
:param proxy_server: **(EXPERIMENTAL)** *(Optional)* Proxy server, similar to the one passed to --proxy-server
|
||||
:param proxy_bypass_list: **(EXPERIMENTAL)** *(Optional)* Proxy bypass list, similar to the one passed to --proxy-bypass-list
|
||||
:param origins_with_universal_network_access: **(EXPERIMENTAL)** *(Optional)* An optional list of origins to grant unlimited cross-origin access to. Parts of the URL other than those constituting origin are ignored.
|
||||
:returns: The id of the context created.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if dispose_on_detach is not None:
|
||||
params['disposeOnDetach'] = dispose_on_detach
|
||||
if proxy_server is not None:
|
||||
params['proxyServer'] = proxy_server
|
||||
if proxy_bypass_list is not None:
|
||||
params['proxyBypassList'] = proxy_bypass_list
|
||||
if origins_with_universal_network_access is not None:
|
||||
params['originsWithUniversalNetworkAccess'] = [i for i in origins_with_universal_network_access]
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Target.createBrowserContext',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return browser.BrowserContextID.from_json(json['browserContextId'])
|
||||
|
||||
|
||||
def get_browser_contexts() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[browser.BrowserContextID]]:
|
||||
'''
|
||||
Returns all browser contexts created with ``Target.createBrowserContext`` method.
|
||||
|
||||
:returns: An array of browser context ids.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Target.getBrowserContexts',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [browser.BrowserContextID.from_json(i) for i in json['browserContextIds']]
|
||||
|
||||
|
||||
def create_target(
|
||||
url: str,
|
||||
width: typing.Optional[int] = None,
|
||||
height: typing.Optional[int] = None,
|
||||
browser_context_id: typing.Optional[browser.BrowserContextID] = None,
|
||||
enable_begin_frame_control: typing.Optional[bool] = None,
|
||||
new_window: typing.Optional[bool] = None,
|
||||
background: typing.Optional[bool] = None,
|
||||
for_tab: typing.Optional[bool] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,TargetID]:
|
||||
'''
|
||||
Creates a new page.
|
||||
|
||||
:param url: The initial URL the page will be navigated to. An empty string indicates about:blank.
|
||||
:param width: *(Optional)* Frame width in DIP (headless chrome only).
|
||||
:param height: *(Optional)* Frame height in DIP (headless chrome only).
|
||||
:param browser_context_id: **(EXPERIMENTAL)** *(Optional)* The browser context to create the page in.
|
||||
:param enable_begin_frame_control: **(EXPERIMENTAL)** *(Optional)* Whether BeginFrames for this target will be controlled via DevTools (headless chrome only, not supported on MacOS yet, false by default).
|
||||
:param new_window: *(Optional)* Whether to create a new Window or Tab (chrome-only, false by default).
|
||||
:param background: *(Optional)* Whether to create the target in background or foreground (chrome-only, false by default).
|
||||
:param for_tab: **(EXPERIMENTAL)** *(Optional)* Whether to create the target of type "tab".
|
||||
:returns: The id of the page opened.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['url'] = url
|
||||
if width is not None:
|
||||
params['width'] = width
|
||||
if height is not None:
|
||||
params['height'] = height
|
||||
if browser_context_id is not None:
|
||||
params['browserContextId'] = browser_context_id.to_json()
|
||||
if enable_begin_frame_control is not None:
|
||||
params['enableBeginFrameControl'] = enable_begin_frame_control
|
||||
if new_window is not None:
|
||||
params['newWindow'] = new_window
|
||||
if background is not None:
|
||||
params['background'] = background
|
||||
if for_tab is not None:
|
||||
params['forTab'] = for_tab
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Target.createTarget',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return TargetID.from_json(json['targetId'])
|
||||
|
||||
|
||||
def detach_from_target(
|
||||
session_id: typing.Optional[SessionID] = None,
|
||||
target_id: typing.Optional[TargetID] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Detaches session with given id.
|
||||
|
||||
:param session_id: *(Optional)* Session to detach.
|
||||
:param target_id: *(Optional)* Deprecated.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if session_id is not None:
|
||||
params['sessionId'] = session_id.to_json()
|
||||
if target_id is not None:
|
||||
params['targetId'] = target_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Target.detachFromTarget',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def dispose_browser_context(
|
||||
browser_context_id: browser.BrowserContextID
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Deletes a BrowserContext. All the belonging pages will be closed without calling their
|
||||
beforeunload hooks.
|
||||
|
||||
:param browser_context_id:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['browserContextId'] = browser_context_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Target.disposeBrowserContext',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def get_target_info(
|
||||
target_id: typing.Optional[TargetID] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,TargetInfo]:
|
||||
'''
|
||||
Returns information about a target.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param target_id: *(Optional)*
|
||||
:returns:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if target_id is not None:
|
||||
params['targetId'] = target_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Target.getTargetInfo',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return TargetInfo.from_json(json['targetInfo'])
|
||||
|
||||
|
||||
def get_targets(
|
||||
filter_: typing.Optional[TargetFilter] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[TargetInfo]]:
|
||||
'''
|
||||
Retrieves a list of available targets.
|
||||
|
||||
:param filter_: **(EXPERIMENTAL)** *(Optional)* Only targets matching filter will be reported. If filter is not specified and target discovery is currently enabled, a filter used for target discovery is used for consistency.
|
||||
:returns: The list of targets.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if filter_ is not None:
|
||||
params['filter'] = filter_.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Target.getTargets',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [TargetInfo.from_json(i) for i in json['targetInfos']]
|
||||
|
||||
|
||||
def send_message_to_target(
|
||||
message: str,
|
||||
session_id: typing.Optional[SessionID] = None,
|
||||
target_id: typing.Optional[TargetID] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Sends protocol message over session with given id.
|
||||
Consider using flat mode instead; see commands attachToTarget, setAutoAttach,
|
||||
and crbug.com/991325.
|
||||
|
||||
:param message:
|
||||
:param session_id: *(Optional)* Identifier of the session.
|
||||
:param target_id: *(Optional)* Deprecated.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['message'] = message
|
||||
if session_id is not None:
|
||||
params['sessionId'] = session_id.to_json()
|
||||
if target_id is not None:
|
||||
params['targetId'] = target_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Target.sendMessageToTarget',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_auto_attach(
|
||||
auto_attach: bool,
|
||||
wait_for_debugger_on_start: bool,
|
||||
flatten: typing.Optional[bool] = None,
|
||||
filter_: typing.Optional[TargetFilter] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Controls whether to automatically attach to new targets which are considered to be related to
|
||||
this one. When turned on, attaches to all existing related targets as well. When turned off,
|
||||
automatically detaches from all currently attached targets.
|
||||
This also clears all targets added by ``autoAttachRelated`` from the list of targets to watch
|
||||
for creation of related targets.
|
||||
|
||||
:param auto_attach: Whether to auto-attach to related targets.
|
||||
:param wait_for_debugger_on_start: Whether to pause new targets when attaching to them. Use ```Runtime.runIfWaitingForDebugger``` to run paused targets.
|
||||
:param flatten: **(EXPERIMENTAL)** *(Optional)* Enables "flat" access to the session via specifying sessionId attribute in the commands. We plan to make this the default, deprecate non-flattened mode, and eventually retire it. See crbug.com/991325.
|
||||
:param filter_: **(EXPERIMENTAL)** *(Optional)* Only targets matching filter will be attached.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['autoAttach'] = auto_attach
|
||||
params['waitForDebuggerOnStart'] = wait_for_debugger_on_start
|
||||
if flatten is not None:
|
||||
params['flatten'] = flatten
|
||||
if filter_ is not None:
|
||||
params['filter'] = filter_.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Target.setAutoAttach',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def auto_attach_related(
|
||||
target_id: TargetID,
|
||||
wait_for_debugger_on_start: bool,
|
||||
filter_: typing.Optional[TargetFilter] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Adds the specified target to the list of targets that will be monitored for any related target
|
||||
creation (such as child frames, child workers and new versions of service worker) and reported
|
||||
through ``attachedToTarget``. The specified target is also auto-attached.
|
||||
This cancels the effect of any previous ``setAutoAttach`` and is also cancelled by subsequent
|
||||
``setAutoAttach``. Only available at the Browser target.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param target_id:
|
||||
:param wait_for_debugger_on_start: Whether to pause new targets when attaching to them. Use ```Runtime.runIfWaitingForDebugger``` to run paused targets.
|
||||
:param filter_: **(EXPERIMENTAL)** *(Optional)* Only targets matching filter will be attached.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['targetId'] = target_id.to_json()
|
||||
params['waitForDebuggerOnStart'] = wait_for_debugger_on_start
|
||||
if filter_ is not None:
|
||||
params['filter'] = filter_.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Target.autoAttachRelated',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_discover_targets(
|
||||
discover: bool,
|
||||
filter_: typing.Optional[TargetFilter] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Controls whether to discover available targets and notify via
|
||||
``targetCreated/targetInfoChanged/targetDestroyed`` events.
|
||||
|
||||
:param discover: Whether to discover available targets.
|
||||
:param filter_: **(EXPERIMENTAL)** *(Optional)* Only targets matching filter will be attached. If ```discover```` is false, ````filter``` must be omitted or empty.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['discover'] = discover
|
||||
if filter_ is not None:
|
||||
params['filter'] = filter_.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Target.setDiscoverTargets',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_remote_locations(
|
||||
locations: typing.List[RemoteLocation]
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enables target discovery for the specified locations, when ``setDiscoverTargets`` was set to
|
||||
``true``.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param locations: List of remote locations.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['locations'] = [i.to_json() for i in locations]
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Target.setRemoteLocations',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
@event_class('Target.attachedToTarget')
|
||||
@dataclass
|
||||
class AttachedToTarget:
|
||||
'''
|
||||
**EXPERIMENTAL**
|
||||
|
||||
Issued when attached to target because of auto-attach or ``attachToTarget`` command.
|
||||
'''
|
||||
#: Identifier assigned to the session used to send/receive messages.
|
||||
session_id: SessionID
|
||||
target_info: TargetInfo
|
||||
waiting_for_debugger: bool
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> AttachedToTarget:
|
||||
return cls(
|
||||
session_id=SessionID.from_json(json['sessionId']),
|
||||
target_info=TargetInfo.from_json(json['targetInfo']),
|
||||
waiting_for_debugger=bool(json['waitingForDebugger'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('Target.detachedFromTarget')
|
||||
@dataclass
|
||||
class DetachedFromTarget:
|
||||
'''
|
||||
**EXPERIMENTAL**
|
||||
|
||||
Issued when detached from target for any reason (including ``detachFromTarget`` command). Can be
|
||||
issued multiple times per target if multiple sessions have been attached to it.
|
||||
'''
|
||||
#: Detached session identifier.
|
||||
session_id: SessionID
|
||||
#: Deprecated.
|
||||
target_id: typing.Optional[TargetID]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> DetachedFromTarget:
|
||||
return cls(
|
||||
session_id=SessionID.from_json(json['sessionId']),
|
||||
target_id=TargetID.from_json(json['targetId']) if 'targetId' in json else None
|
||||
)
|
||||
|
||||
|
||||
@event_class('Target.receivedMessageFromTarget')
|
||||
@dataclass
|
||||
class ReceivedMessageFromTarget:
|
||||
'''
|
||||
Notifies about a new protocol message received from the session (as reported in
|
||||
``attachedToTarget`` event).
|
||||
'''
|
||||
#: Identifier of a session which sends a message.
|
||||
session_id: SessionID
|
||||
message: str
|
||||
#: Deprecated.
|
||||
target_id: typing.Optional[TargetID]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> ReceivedMessageFromTarget:
|
||||
return cls(
|
||||
session_id=SessionID.from_json(json['sessionId']),
|
||||
message=str(json['message']),
|
||||
target_id=TargetID.from_json(json['targetId']) if 'targetId' in json else None
|
||||
)
|
||||
|
||||
|
||||
@event_class('Target.targetCreated')
|
||||
@dataclass
|
||||
class TargetCreated:
|
||||
'''
|
||||
Issued when a possible inspection target is created.
|
||||
'''
|
||||
target_info: TargetInfo
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> TargetCreated:
|
||||
return cls(
|
||||
target_info=TargetInfo.from_json(json['targetInfo'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('Target.targetDestroyed')
|
||||
@dataclass
|
||||
class TargetDestroyed:
|
||||
'''
|
||||
Issued when a target is destroyed.
|
||||
'''
|
||||
target_id: TargetID
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> TargetDestroyed:
|
||||
return cls(
|
||||
target_id=TargetID.from_json(json['targetId'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('Target.targetCrashed')
|
||||
@dataclass
|
||||
class TargetCrashed:
|
||||
'''
|
||||
Issued when a target has crashed.
|
||||
'''
|
||||
target_id: TargetID
|
||||
#: Termination status type.
|
||||
status: str
|
||||
#: Termination error code.
|
||||
error_code: int
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> TargetCrashed:
|
||||
return cls(
|
||||
target_id=TargetID.from_json(json['targetId']),
|
||||
status=str(json['status']),
|
||||
error_code=int(json['errorCode'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('Target.targetInfoChanged')
|
||||
@dataclass
|
||||
class TargetInfoChanged:
|
||||
'''
|
||||
Issued when some information about a target has changed. This only happens between
|
||||
``targetCreated`` and ``targetDestroyed``.
|
||||
'''
|
||||
target_info: TargetInfo
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> TargetInfoChanged:
|
||||
return cls(
|
||||
target_info=TargetInfo.from_json(json['targetInfo'])
|
||||
)
|
@ -0,0 +1,63 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: Tethering (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
|
||||
def bind(
|
||||
port: int
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Request browser port binding.
|
||||
|
||||
:param port: Port number to bind.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['port'] = port
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Tethering.bind',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def unbind(
|
||||
port: int
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Request browser port unbinding.
|
||||
|
||||
:param port: Port number to unbind.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['port'] = port
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Tethering.unbind',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
@event_class('Tethering.accepted')
|
||||
@dataclass
|
||||
class Accepted:
|
||||
'''
|
||||
Informs that port was successfully bound and got a specified connection id.
|
||||
'''
|
||||
#: Port number that was successfully bound.
|
||||
port: int
|
||||
#: Connection id to be used.
|
||||
connection_id: str
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> Accepted:
|
||||
return cls(
|
||||
port=int(json['port']),
|
||||
connection_id=str(json['connectionId'])
|
||||
)
|
360
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/tracing.py
Executable file
360
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/tracing.py
Executable file
@ -0,0 +1,360 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: Tracing
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
from . import io
|
||||
|
||||
|
||||
class MemoryDumpConfig(dict):
|
||||
'''
|
||||
Configuration for memory dump. Used only when "memory-infra" category is enabled.
|
||||
'''
|
||||
def to_json(self) -> dict:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: dict) -> MemoryDumpConfig:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'MemoryDumpConfig({})'.format(super().__repr__())
|
||||
|
||||
|
||||
@dataclass
|
||||
class TraceConfig:
|
||||
#: Controls how the trace buffer stores data.
|
||||
record_mode: typing.Optional[str] = None
|
||||
|
||||
#: Size of the trace buffer in kilobytes. If not specified or zero is passed, a default value
|
||||
#: of 200 MB would be used.
|
||||
trace_buffer_size_in_kb: typing.Optional[float] = None
|
||||
|
||||
#: Turns on JavaScript stack sampling.
|
||||
enable_sampling: typing.Optional[bool] = None
|
||||
|
||||
#: Turns on system tracing.
|
||||
enable_systrace: typing.Optional[bool] = None
|
||||
|
||||
#: Turns on argument filter.
|
||||
enable_argument_filter: typing.Optional[bool] = None
|
||||
|
||||
#: Included category filters.
|
||||
included_categories: typing.Optional[typing.List[str]] = None
|
||||
|
||||
#: Excluded category filters.
|
||||
excluded_categories: typing.Optional[typing.List[str]] = None
|
||||
|
||||
#: Configuration to synthesize the delays in tracing.
|
||||
synthetic_delays: typing.Optional[typing.List[str]] = None
|
||||
|
||||
#: Configuration for memory dump triggers. Used only when "memory-infra" category is enabled.
|
||||
memory_dump_config: typing.Optional[MemoryDumpConfig] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
if self.record_mode is not None:
|
||||
json['recordMode'] = self.record_mode
|
||||
if self.trace_buffer_size_in_kb is not None:
|
||||
json['traceBufferSizeInKb'] = self.trace_buffer_size_in_kb
|
||||
if self.enable_sampling is not None:
|
||||
json['enableSampling'] = self.enable_sampling
|
||||
if self.enable_systrace is not None:
|
||||
json['enableSystrace'] = self.enable_systrace
|
||||
if self.enable_argument_filter is not None:
|
||||
json['enableArgumentFilter'] = self.enable_argument_filter
|
||||
if self.included_categories is not None:
|
||||
json['includedCategories'] = [i for i in self.included_categories]
|
||||
if self.excluded_categories is not None:
|
||||
json['excludedCategories'] = [i for i in self.excluded_categories]
|
||||
if self.synthetic_delays is not None:
|
||||
json['syntheticDelays'] = [i for i in self.synthetic_delays]
|
||||
if self.memory_dump_config is not None:
|
||||
json['memoryDumpConfig'] = self.memory_dump_config.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
record_mode=str(json['recordMode']) if 'recordMode' in json else None,
|
||||
trace_buffer_size_in_kb=float(json['traceBufferSizeInKb']) if 'traceBufferSizeInKb' in json else None,
|
||||
enable_sampling=bool(json['enableSampling']) if 'enableSampling' in json else None,
|
||||
enable_systrace=bool(json['enableSystrace']) if 'enableSystrace' in json else None,
|
||||
enable_argument_filter=bool(json['enableArgumentFilter']) if 'enableArgumentFilter' in json else None,
|
||||
included_categories=[str(i) for i in json['includedCategories']] if 'includedCategories' in json else None,
|
||||
excluded_categories=[str(i) for i in json['excludedCategories']] if 'excludedCategories' in json else None,
|
||||
synthetic_delays=[str(i) for i in json['syntheticDelays']] if 'syntheticDelays' in json else None,
|
||||
memory_dump_config=MemoryDumpConfig.from_json(json['memoryDumpConfig']) if 'memoryDumpConfig' in json else None,
|
||||
)
|
||||
|
||||
|
||||
class StreamFormat(enum.Enum):
|
||||
'''
|
||||
Data format of a trace. Can be either the legacy JSON format or the
|
||||
protocol buffer format. Note that the JSON format will be deprecated soon.
|
||||
'''
|
||||
JSON = "json"
|
||||
PROTO = "proto"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
class StreamCompression(enum.Enum):
|
||||
'''
|
||||
Compression type to use for traces returned via streams.
|
||||
'''
|
||||
NONE = "none"
|
||||
GZIP = "gzip"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
class MemoryDumpLevelOfDetail(enum.Enum):
|
||||
'''
|
||||
Details exposed when memory request explicitly declared.
|
||||
Keep consistent with memory_dump_request_args.h and
|
||||
memory_instrumentation.mojom
|
||||
'''
|
||||
BACKGROUND = "background"
|
||||
LIGHT = "light"
|
||||
DETAILED = "detailed"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
class TracingBackend(enum.Enum):
|
||||
'''
|
||||
Backend type to use for tracing. ``chrome`` uses the Chrome-integrated
|
||||
tracing service and is supported on all platforms. ``system`` is only
|
||||
supported on Chrome OS and uses the Perfetto system tracing service.
|
||||
``auto`` chooses ``system`` when the perfettoConfig provided to Tracing.start
|
||||
specifies at least one non-Chrome data source; otherwise uses ``chrome``.
|
||||
'''
|
||||
AUTO = "auto"
|
||||
CHROME = "chrome"
|
||||
SYSTEM = "system"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
def end() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Stop trace events collection.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Tracing.end',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def get_categories() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[str]]:
|
||||
'''
|
||||
Gets supported tracing categories.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:returns: A list of supported tracing categories.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Tracing.getCategories',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [str(i) for i in json['categories']]
|
||||
|
||||
|
||||
def record_clock_sync_marker(
|
||||
sync_id: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Record a clock sync marker in the trace.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param sync_id: The ID of this clock sync marker
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['syncId'] = sync_id
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Tracing.recordClockSyncMarker',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def request_memory_dump(
|
||||
deterministic: typing.Optional[bool] = None,
|
||||
level_of_detail: typing.Optional[MemoryDumpLevelOfDetail] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.Tuple[str, bool]]:
|
||||
'''
|
||||
Request a global memory dump.
|
||||
|
||||
**EXPERIMENTAL**
|
||||
|
||||
:param deterministic: *(Optional)* Enables more deterministic results by forcing garbage collection
|
||||
:param level_of_detail: *(Optional)* Specifies level of details in memory dump. Defaults to "detailed".
|
||||
:returns: A tuple with the following items:
|
||||
|
||||
0. **dumpGuid** - GUID of the resulting global memory dump.
|
||||
1. **success** - True iff the global memory dump succeeded.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if deterministic is not None:
|
||||
params['deterministic'] = deterministic
|
||||
if level_of_detail is not None:
|
||||
params['levelOfDetail'] = level_of_detail.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Tracing.requestMemoryDump',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return (
|
||||
str(json['dumpGuid']),
|
||||
bool(json['success'])
|
||||
)
|
||||
|
||||
|
||||
def start(
|
||||
categories: typing.Optional[str] = None,
|
||||
options: typing.Optional[str] = None,
|
||||
buffer_usage_reporting_interval: typing.Optional[float] = None,
|
||||
transfer_mode: typing.Optional[str] = None,
|
||||
stream_format: typing.Optional[StreamFormat] = None,
|
||||
stream_compression: typing.Optional[StreamCompression] = None,
|
||||
trace_config: typing.Optional[TraceConfig] = None,
|
||||
perfetto_config: typing.Optional[str] = None,
|
||||
tracing_backend: typing.Optional[TracingBackend] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Start trace events collection.
|
||||
|
||||
:param categories: **(EXPERIMENTAL)** *(Optional)* Category/tag filter
|
||||
:param options: **(EXPERIMENTAL)** *(Optional)* Tracing options
|
||||
:param buffer_usage_reporting_interval: **(EXPERIMENTAL)** *(Optional)* If set, the agent will issue bufferUsage events at this interval, specified in milliseconds
|
||||
:param transfer_mode: *(Optional)* Whether to report trace events as series of dataCollected events or to save trace to a stream (defaults to ```ReportEvents````).
|
||||
:param stream_format: *(Optional)* Trace data format to use. This only applies when using ````ReturnAsStream```` transfer mode (defaults to ````json````).
|
||||
:param stream_compression: **(EXPERIMENTAL)** *(Optional)* Compression format to use. This only applies when using ````ReturnAsStream```` transfer mode (defaults to ````none````)
|
||||
:param trace_config: *(Optional)*
|
||||
:param perfetto_config: **(EXPERIMENTAL)** *(Optional)* Base64-encoded serialized perfetto.protos.TraceConfig protobuf message When specified, the parameters ````categories````, ````options````, ````traceConfig```` are ignored.
|
||||
:param tracing_backend: **(EXPERIMENTAL)** *(Optional)* Backend type (defaults to ````auto```)
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if categories is not None:
|
||||
params['categories'] = categories
|
||||
if options is not None:
|
||||
params['options'] = options
|
||||
if buffer_usage_reporting_interval is not None:
|
||||
params['bufferUsageReportingInterval'] = buffer_usage_reporting_interval
|
||||
if transfer_mode is not None:
|
||||
params['transferMode'] = transfer_mode
|
||||
if stream_format is not None:
|
||||
params['streamFormat'] = stream_format.to_json()
|
||||
if stream_compression is not None:
|
||||
params['streamCompression'] = stream_compression.to_json()
|
||||
if trace_config is not None:
|
||||
params['traceConfig'] = trace_config.to_json()
|
||||
if perfetto_config is not None:
|
||||
params['perfettoConfig'] = perfetto_config
|
||||
if tracing_backend is not None:
|
||||
params['tracingBackend'] = tracing_backend.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'Tracing.start',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
@event_class('Tracing.bufferUsage')
|
||||
@dataclass
|
||||
class BufferUsage:
|
||||
'''
|
||||
**EXPERIMENTAL**
|
||||
|
||||
|
||||
'''
|
||||
#: A number in range [0..1] that indicates the used size of event buffer as a fraction of its
|
||||
#: total size.
|
||||
percent_full: typing.Optional[float]
|
||||
#: An approximate number of events in the trace log.
|
||||
event_count: typing.Optional[float]
|
||||
#: A number in range [0..1] that indicates the used size of event buffer as a fraction of its
|
||||
#: total size.
|
||||
value: typing.Optional[float]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> BufferUsage:
|
||||
return cls(
|
||||
percent_full=float(json['percentFull']) if 'percentFull' in json else None,
|
||||
event_count=float(json['eventCount']) if 'eventCount' in json else None,
|
||||
value=float(json['value']) if 'value' in json else None
|
||||
)
|
||||
|
||||
|
||||
@event_class('Tracing.dataCollected')
|
||||
@dataclass
|
||||
class DataCollected:
|
||||
'''
|
||||
**EXPERIMENTAL**
|
||||
|
||||
Contains a bucket of collected trace events. When tracing is stopped collected events will be
|
||||
sent as a sequence of dataCollected events followed by tracingComplete event.
|
||||
'''
|
||||
value: typing.List[dict]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> DataCollected:
|
||||
return cls(
|
||||
value=[dict(i) for i in json['value']]
|
||||
)
|
||||
|
||||
|
||||
@event_class('Tracing.tracingComplete')
|
||||
@dataclass
|
||||
class TracingComplete:
|
||||
'''
|
||||
Signals that tracing is stopped and there is no trace buffers pending flush, all data were
|
||||
delivered via dataCollected events.
|
||||
'''
|
||||
#: Indicates whether some trace data is known to have been lost, e.g. because the trace ring
|
||||
#: buffer wrapped around.
|
||||
data_loss_occurred: bool
|
||||
#: A handle of the stream that holds resulting trace data.
|
||||
stream: typing.Optional[io.StreamHandle]
|
||||
#: Trace data format of returned stream.
|
||||
trace_format: typing.Optional[StreamFormat]
|
||||
#: Compression format of returned stream.
|
||||
stream_compression: typing.Optional[StreamCompression]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> TracingComplete:
|
||||
return cls(
|
||||
data_loss_occurred=bool(json['dataLossOccurred']),
|
||||
stream=io.StreamHandle.from_json(json['stream']) if 'stream' in json else None,
|
||||
trace_format=StreamFormat.from_json(json['traceFormat']) if 'traceFormat' in json else None,
|
||||
stream_compression=StreamCompression.from_json(json['streamCompression']) if 'streamCompression' in json else None
|
||||
)
|
20
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/util.py
Executable file
20
lib/python3.13/site-packages/selenium/webdriver/common/devtools/v130/util.py
Executable file
@ -0,0 +1,20 @@
|
||||
|
||||
import typing
|
||||
|
||||
|
||||
T_JSON_DICT = typing.Dict[str, typing.Any]
|
||||
_event_parsers = dict()
|
||||
|
||||
|
||||
def event_class(method):
|
||||
''' A decorator that registers a class as an event class. '''
|
||||
def decorate(cls):
|
||||
_event_parsers[method] = cls
|
||||
cls.event_class = method
|
||||
return cls
|
||||
return decorate
|
||||
|
||||
|
||||
def parse_json_event(json: T_JSON_DICT) -> typing.Any:
|
||||
''' Parse a JSON dictionary into a CDP event. '''
|
||||
return _event_parsers[json['method']].from_json(json['params'])
|
@ -0,0 +1,603 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: WebAudio (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
|
||||
class GraphObjectId(str):
|
||||
'''
|
||||
An unique ID for a graph object (AudioContext, AudioNode, AudioParam) in Web Audio API
|
||||
'''
|
||||
def to_json(self) -> str:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: str) -> GraphObjectId:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'GraphObjectId({})'.format(super().__repr__())
|
||||
|
||||
|
||||
class ContextType(enum.Enum):
|
||||
'''
|
||||
Enum of BaseAudioContext types
|
||||
'''
|
||||
REALTIME = "realtime"
|
||||
OFFLINE = "offline"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
class ContextState(enum.Enum):
|
||||
'''
|
||||
Enum of AudioContextState from the spec
|
||||
'''
|
||||
SUSPENDED = "suspended"
|
||||
RUNNING = "running"
|
||||
CLOSED = "closed"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
class NodeType(str):
|
||||
'''
|
||||
Enum of AudioNode types
|
||||
'''
|
||||
def to_json(self) -> str:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: str) -> NodeType:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'NodeType({})'.format(super().__repr__())
|
||||
|
||||
|
||||
class ChannelCountMode(enum.Enum):
|
||||
'''
|
||||
Enum of AudioNode::ChannelCountMode from the spec
|
||||
'''
|
||||
CLAMPED_MAX = "clamped-max"
|
||||
EXPLICIT = "explicit"
|
||||
MAX_ = "max"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
class ChannelInterpretation(enum.Enum):
|
||||
'''
|
||||
Enum of AudioNode::ChannelInterpretation from the spec
|
||||
'''
|
||||
DISCRETE = "discrete"
|
||||
SPEAKERS = "speakers"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
class ParamType(str):
|
||||
'''
|
||||
Enum of AudioParam types
|
||||
'''
|
||||
def to_json(self) -> str:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: str) -> ParamType:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'ParamType({})'.format(super().__repr__())
|
||||
|
||||
|
||||
class AutomationRate(enum.Enum):
|
||||
'''
|
||||
Enum of AudioParam::AutomationRate from the spec
|
||||
'''
|
||||
A_RATE = "a-rate"
|
||||
K_RATE = "k-rate"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ContextRealtimeData:
|
||||
'''
|
||||
Fields in AudioContext that change in real-time.
|
||||
'''
|
||||
#: The current context time in second in BaseAudioContext.
|
||||
current_time: float
|
||||
|
||||
#: The time spent on rendering graph divided by render quantum duration,
|
||||
#: and multiplied by 100. 100 means the audio renderer reached the full
|
||||
#: capacity and glitch may occur.
|
||||
render_capacity: float
|
||||
|
||||
#: A running mean of callback interval.
|
||||
callback_interval_mean: float
|
||||
|
||||
#: A running variance of callback interval.
|
||||
callback_interval_variance: float
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['currentTime'] = self.current_time
|
||||
json['renderCapacity'] = self.render_capacity
|
||||
json['callbackIntervalMean'] = self.callback_interval_mean
|
||||
json['callbackIntervalVariance'] = self.callback_interval_variance
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
current_time=float(json['currentTime']),
|
||||
render_capacity=float(json['renderCapacity']),
|
||||
callback_interval_mean=float(json['callbackIntervalMean']),
|
||||
callback_interval_variance=float(json['callbackIntervalVariance']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class BaseAudioContext:
|
||||
'''
|
||||
Protocol object for BaseAudioContext
|
||||
'''
|
||||
context_id: GraphObjectId
|
||||
|
||||
context_type: ContextType
|
||||
|
||||
context_state: ContextState
|
||||
|
||||
#: Platform-dependent callback buffer size.
|
||||
callback_buffer_size: float
|
||||
|
||||
#: Number of output channels supported by audio hardware in use.
|
||||
max_output_channel_count: float
|
||||
|
||||
#: Context sample rate.
|
||||
sample_rate: float
|
||||
|
||||
realtime_data: typing.Optional[ContextRealtimeData] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['contextId'] = self.context_id.to_json()
|
||||
json['contextType'] = self.context_type.to_json()
|
||||
json['contextState'] = self.context_state.to_json()
|
||||
json['callbackBufferSize'] = self.callback_buffer_size
|
||||
json['maxOutputChannelCount'] = self.max_output_channel_count
|
||||
json['sampleRate'] = self.sample_rate
|
||||
if self.realtime_data is not None:
|
||||
json['realtimeData'] = self.realtime_data.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
context_id=GraphObjectId.from_json(json['contextId']),
|
||||
context_type=ContextType.from_json(json['contextType']),
|
||||
context_state=ContextState.from_json(json['contextState']),
|
||||
callback_buffer_size=float(json['callbackBufferSize']),
|
||||
max_output_channel_count=float(json['maxOutputChannelCount']),
|
||||
sample_rate=float(json['sampleRate']),
|
||||
realtime_data=ContextRealtimeData.from_json(json['realtimeData']) if 'realtimeData' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class AudioListener:
|
||||
'''
|
||||
Protocol object for AudioListener
|
||||
'''
|
||||
listener_id: GraphObjectId
|
||||
|
||||
context_id: GraphObjectId
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['listenerId'] = self.listener_id.to_json()
|
||||
json['contextId'] = self.context_id.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
listener_id=GraphObjectId.from_json(json['listenerId']),
|
||||
context_id=GraphObjectId.from_json(json['contextId']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class AudioNode:
|
||||
'''
|
||||
Protocol object for AudioNode
|
||||
'''
|
||||
node_id: GraphObjectId
|
||||
|
||||
context_id: GraphObjectId
|
||||
|
||||
node_type: NodeType
|
||||
|
||||
number_of_inputs: float
|
||||
|
||||
number_of_outputs: float
|
||||
|
||||
channel_count: float
|
||||
|
||||
channel_count_mode: ChannelCountMode
|
||||
|
||||
channel_interpretation: ChannelInterpretation
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['nodeId'] = self.node_id.to_json()
|
||||
json['contextId'] = self.context_id.to_json()
|
||||
json['nodeType'] = self.node_type.to_json()
|
||||
json['numberOfInputs'] = self.number_of_inputs
|
||||
json['numberOfOutputs'] = self.number_of_outputs
|
||||
json['channelCount'] = self.channel_count
|
||||
json['channelCountMode'] = self.channel_count_mode.to_json()
|
||||
json['channelInterpretation'] = self.channel_interpretation.to_json()
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
node_id=GraphObjectId.from_json(json['nodeId']),
|
||||
context_id=GraphObjectId.from_json(json['contextId']),
|
||||
node_type=NodeType.from_json(json['nodeType']),
|
||||
number_of_inputs=float(json['numberOfInputs']),
|
||||
number_of_outputs=float(json['numberOfOutputs']),
|
||||
channel_count=float(json['channelCount']),
|
||||
channel_count_mode=ChannelCountMode.from_json(json['channelCountMode']),
|
||||
channel_interpretation=ChannelInterpretation.from_json(json['channelInterpretation']),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class AudioParam:
|
||||
'''
|
||||
Protocol object for AudioParam
|
||||
'''
|
||||
param_id: GraphObjectId
|
||||
|
||||
node_id: GraphObjectId
|
||||
|
||||
context_id: GraphObjectId
|
||||
|
||||
param_type: ParamType
|
||||
|
||||
rate: AutomationRate
|
||||
|
||||
default_value: float
|
||||
|
||||
min_value: float
|
||||
|
||||
max_value: float
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['paramId'] = self.param_id.to_json()
|
||||
json['nodeId'] = self.node_id.to_json()
|
||||
json['contextId'] = self.context_id.to_json()
|
||||
json['paramType'] = self.param_type.to_json()
|
||||
json['rate'] = self.rate.to_json()
|
||||
json['defaultValue'] = self.default_value
|
||||
json['minValue'] = self.min_value
|
||||
json['maxValue'] = self.max_value
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
param_id=GraphObjectId.from_json(json['paramId']),
|
||||
node_id=GraphObjectId.from_json(json['nodeId']),
|
||||
context_id=GraphObjectId.from_json(json['contextId']),
|
||||
param_type=ParamType.from_json(json['paramType']),
|
||||
rate=AutomationRate.from_json(json['rate']),
|
||||
default_value=float(json['defaultValue']),
|
||||
min_value=float(json['minValue']),
|
||||
max_value=float(json['maxValue']),
|
||||
)
|
||||
|
||||
|
||||
def enable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enables the WebAudio domain and starts sending context lifetime events.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'WebAudio.enable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Disables the WebAudio domain.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'WebAudio.disable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def get_realtime_data(
|
||||
context_id: GraphObjectId
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,ContextRealtimeData]:
|
||||
'''
|
||||
Fetch the realtime data from the registered contexts.
|
||||
|
||||
:param context_id:
|
||||
:returns:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['contextId'] = context_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'WebAudio.getRealtimeData',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return ContextRealtimeData.from_json(json['realtimeData'])
|
||||
|
||||
|
||||
@event_class('WebAudio.contextCreated')
|
||||
@dataclass
|
||||
class ContextCreated:
|
||||
'''
|
||||
Notifies that a new BaseAudioContext has been created.
|
||||
'''
|
||||
context: BaseAudioContext
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> ContextCreated:
|
||||
return cls(
|
||||
context=BaseAudioContext.from_json(json['context'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('WebAudio.contextWillBeDestroyed')
|
||||
@dataclass
|
||||
class ContextWillBeDestroyed:
|
||||
'''
|
||||
Notifies that an existing BaseAudioContext will be destroyed.
|
||||
'''
|
||||
context_id: GraphObjectId
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> ContextWillBeDestroyed:
|
||||
return cls(
|
||||
context_id=GraphObjectId.from_json(json['contextId'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('WebAudio.contextChanged')
|
||||
@dataclass
|
||||
class ContextChanged:
|
||||
'''
|
||||
Notifies that existing BaseAudioContext has changed some properties (id stays the same)..
|
||||
'''
|
||||
context: BaseAudioContext
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> ContextChanged:
|
||||
return cls(
|
||||
context=BaseAudioContext.from_json(json['context'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('WebAudio.audioListenerCreated')
|
||||
@dataclass
|
||||
class AudioListenerCreated:
|
||||
'''
|
||||
Notifies that the construction of an AudioListener has finished.
|
||||
'''
|
||||
listener: AudioListener
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> AudioListenerCreated:
|
||||
return cls(
|
||||
listener=AudioListener.from_json(json['listener'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('WebAudio.audioListenerWillBeDestroyed')
|
||||
@dataclass
|
||||
class AudioListenerWillBeDestroyed:
|
||||
'''
|
||||
Notifies that a new AudioListener has been created.
|
||||
'''
|
||||
context_id: GraphObjectId
|
||||
listener_id: GraphObjectId
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> AudioListenerWillBeDestroyed:
|
||||
return cls(
|
||||
context_id=GraphObjectId.from_json(json['contextId']),
|
||||
listener_id=GraphObjectId.from_json(json['listenerId'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('WebAudio.audioNodeCreated')
|
||||
@dataclass
|
||||
class AudioNodeCreated:
|
||||
'''
|
||||
Notifies that a new AudioNode has been created.
|
||||
'''
|
||||
node: AudioNode
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> AudioNodeCreated:
|
||||
return cls(
|
||||
node=AudioNode.from_json(json['node'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('WebAudio.audioNodeWillBeDestroyed')
|
||||
@dataclass
|
||||
class AudioNodeWillBeDestroyed:
|
||||
'''
|
||||
Notifies that an existing AudioNode has been destroyed.
|
||||
'''
|
||||
context_id: GraphObjectId
|
||||
node_id: GraphObjectId
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> AudioNodeWillBeDestroyed:
|
||||
return cls(
|
||||
context_id=GraphObjectId.from_json(json['contextId']),
|
||||
node_id=GraphObjectId.from_json(json['nodeId'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('WebAudio.audioParamCreated')
|
||||
@dataclass
|
||||
class AudioParamCreated:
|
||||
'''
|
||||
Notifies that a new AudioParam has been created.
|
||||
'''
|
||||
param: AudioParam
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> AudioParamCreated:
|
||||
return cls(
|
||||
param=AudioParam.from_json(json['param'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('WebAudio.audioParamWillBeDestroyed')
|
||||
@dataclass
|
||||
class AudioParamWillBeDestroyed:
|
||||
'''
|
||||
Notifies that an existing AudioParam has been destroyed.
|
||||
'''
|
||||
context_id: GraphObjectId
|
||||
node_id: GraphObjectId
|
||||
param_id: GraphObjectId
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> AudioParamWillBeDestroyed:
|
||||
return cls(
|
||||
context_id=GraphObjectId.from_json(json['contextId']),
|
||||
node_id=GraphObjectId.from_json(json['nodeId']),
|
||||
param_id=GraphObjectId.from_json(json['paramId'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('WebAudio.nodesConnected')
|
||||
@dataclass
|
||||
class NodesConnected:
|
||||
'''
|
||||
Notifies that two AudioNodes are connected.
|
||||
'''
|
||||
context_id: GraphObjectId
|
||||
source_id: GraphObjectId
|
||||
destination_id: GraphObjectId
|
||||
source_output_index: typing.Optional[float]
|
||||
destination_input_index: typing.Optional[float]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> NodesConnected:
|
||||
return cls(
|
||||
context_id=GraphObjectId.from_json(json['contextId']),
|
||||
source_id=GraphObjectId.from_json(json['sourceId']),
|
||||
destination_id=GraphObjectId.from_json(json['destinationId']),
|
||||
source_output_index=float(json['sourceOutputIndex']) if 'sourceOutputIndex' in json else None,
|
||||
destination_input_index=float(json['destinationInputIndex']) if 'destinationInputIndex' in json else None
|
||||
)
|
||||
|
||||
|
||||
@event_class('WebAudio.nodesDisconnected')
|
||||
@dataclass
|
||||
class NodesDisconnected:
|
||||
'''
|
||||
Notifies that AudioNodes are disconnected. The destination can be null, and it means all the outgoing connections from the source are disconnected.
|
||||
'''
|
||||
context_id: GraphObjectId
|
||||
source_id: GraphObjectId
|
||||
destination_id: GraphObjectId
|
||||
source_output_index: typing.Optional[float]
|
||||
destination_input_index: typing.Optional[float]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> NodesDisconnected:
|
||||
return cls(
|
||||
context_id=GraphObjectId.from_json(json['contextId']),
|
||||
source_id=GraphObjectId.from_json(json['sourceId']),
|
||||
destination_id=GraphObjectId.from_json(json['destinationId']),
|
||||
source_output_index=float(json['sourceOutputIndex']) if 'sourceOutputIndex' in json else None,
|
||||
destination_input_index=float(json['destinationInputIndex']) if 'destinationInputIndex' in json else None
|
||||
)
|
||||
|
||||
|
||||
@event_class('WebAudio.nodeParamConnected')
|
||||
@dataclass
|
||||
class NodeParamConnected:
|
||||
'''
|
||||
Notifies that an AudioNode is connected to an AudioParam.
|
||||
'''
|
||||
context_id: GraphObjectId
|
||||
source_id: GraphObjectId
|
||||
destination_id: GraphObjectId
|
||||
source_output_index: typing.Optional[float]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> NodeParamConnected:
|
||||
return cls(
|
||||
context_id=GraphObjectId.from_json(json['contextId']),
|
||||
source_id=GraphObjectId.from_json(json['sourceId']),
|
||||
destination_id=GraphObjectId.from_json(json['destinationId']),
|
||||
source_output_index=float(json['sourceOutputIndex']) if 'sourceOutputIndex' in json else None
|
||||
)
|
||||
|
||||
|
||||
@event_class('WebAudio.nodeParamDisconnected')
|
||||
@dataclass
|
||||
class NodeParamDisconnected:
|
||||
'''
|
||||
Notifies that an AudioNode is disconnected to an AudioParam.
|
||||
'''
|
||||
context_id: GraphObjectId
|
||||
source_id: GraphObjectId
|
||||
destination_id: GraphObjectId
|
||||
source_output_index: typing.Optional[float]
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> NodeParamDisconnected:
|
||||
return cls(
|
||||
context_id=GraphObjectId.from_json(json['contextId']),
|
||||
source_id=GraphObjectId.from_json(json['sourceId']),
|
||||
destination_id=GraphObjectId.from_json(json['destinationId']),
|
||||
source_output_index=float(json['sourceOutputIndex']) if 'sourceOutputIndex' in json else None
|
||||
)
|
@ -0,0 +1,528 @@
|
||||
# DO NOT EDIT THIS FILE!
|
||||
#
|
||||
# This file is generated from the CDP specification. If you need to make
|
||||
# changes, edit the generator and regenerate all of the modules.
|
||||
#
|
||||
# CDP domain: WebAuthn (experimental)
|
||||
from __future__ import annotations
|
||||
from .util import event_class, T_JSON_DICT
|
||||
from dataclasses import dataclass
|
||||
import enum
|
||||
import typing
|
||||
|
||||
class AuthenticatorId(str):
|
||||
def to_json(self) -> str:
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: str) -> AuthenticatorId:
|
||||
return cls(json)
|
||||
|
||||
def __repr__(self):
|
||||
return 'AuthenticatorId({})'.format(super().__repr__())
|
||||
|
||||
|
||||
class AuthenticatorProtocol(enum.Enum):
|
||||
U2F = "u2f"
|
||||
CTAP2 = "ctap2"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
class Ctap2Version(enum.Enum):
|
||||
CTAP2_0 = "ctap2_0"
|
||||
CTAP2_1 = "ctap2_1"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
class AuthenticatorTransport(enum.Enum):
|
||||
USB = "usb"
|
||||
NFC = "nfc"
|
||||
BLE = "ble"
|
||||
CABLE = "cable"
|
||||
INTERNAL = "internal"
|
||||
|
||||
def to_json(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(json)
|
||||
|
||||
|
||||
@dataclass
|
||||
class VirtualAuthenticatorOptions:
|
||||
protocol: AuthenticatorProtocol
|
||||
|
||||
transport: AuthenticatorTransport
|
||||
|
||||
#: Defaults to ctap2_0. Ignored if ``protocol`` == u2f.
|
||||
ctap2_version: typing.Optional[Ctap2Version] = None
|
||||
|
||||
#: Defaults to false.
|
||||
has_resident_key: typing.Optional[bool] = None
|
||||
|
||||
#: Defaults to false.
|
||||
has_user_verification: typing.Optional[bool] = None
|
||||
|
||||
#: If set to true, the authenticator will support the largeBlob extension.
|
||||
#: https://w3c.github.io/webauthn#largeBlob
|
||||
#: Defaults to false.
|
||||
has_large_blob: typing.Optional[bool] = None
|
||||
|
||||
#: If set to true, the authenticator will support the credBlob extension.
|
||||
#: https://fidoalliance.org/specs/fido-v2.1-rd-20201208/fido-client-to-authenticator-protocol-v2.1-rd-20201208.html#sctn-credBlob-extension
|
||||
#: Defaults to false.
|
||||
has_cred_blob: typing.Optional[bool] = None
|
||||
|
||||
#: If set to true, the authenticator will support the minPinLength extension.
|
||||
#: https://fidoalliance.org/specs/fido-v2.1-ps-20210615/fido-client-to-authenticator-protocol-v2.1-ps-20210615.html#sctn-minpinlength-extension
|
||||
#: Defaults to false.
|
||||
has_min_pin_length: typing.Optional[bool] = None
|
||||
|
||||
#: If set to true, the authenticator will support the prf extension.
|
||||
#: https://w3c.github.io/webauthn/#prf-extension
|
||||
#: Defaults to false.
|
||||
has_prf: typing.Optional[bool] = None
|
||||
|
||||
#: If set to true, tests of user presence will succeed immediately.
|
||||
#: Otherwise, they will not be resolved. Defaults to true.
|
||||
automatic_presence_simulation: typing.Optional[bool] = None
|
||||
|
||||
#: Sets whether User Verification succeeds or fails for an authenticator.
|
||||
#: Defaults to false.
|
||||
is_user_verified: typing.Optional[bool] = None
|
||||
|
||||
#: Credentials created by this authenticator will have the backup
|
||||
#: eligibility (BE) flag set to this value. Defaults to false.
|
||||
#: https://w3c.github.io/webauthn/#sctn-credential-backup
|
||||
default_backup_eligibility: typing.Optional[bool] = None
|
||||
|
||||
#: Credentials created by this authenticator will have the backup state
|
||||
#: (BS) flag set to this value. Defaults to false.
|
||||
#: https://w3c.github.io/webauthn/#sctn-credential-backup
|
||||
default_backup_state: typing.Optional[bool] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['protocol'] = self.protocol.to_json()
|
||||
json['transport'] = self.transport.to_json()
|
||||
if self.ctap2_version is not None:
|
||||
json['ctap2Version'] = self.ctap2_version.to_json()
|
||||
if self.has_resident_key is not None:
|
||||
json['hasResidentKey'] = self.has_resident_key
|
||||
if self.has_user_verification is not None:
|
||||
json['hasUserVerification'] = self.has_user_verification
|
||||
if self.has_large_blob is not None:
|
||||
json['hasLargeBlob'] = self.has_large_blob
|
||||
if self.has_cred_blob is not None:
|
||||
json['hasCredBlob'] = self.has_cred_blob
|
||||
if self.has_min_pin_length is not None:
|
||||
json['hasMinPinLength'] = self.has_min_pin_length
|
||||
if self.has_prf is not None:
|
||||
json['hasPrf'] = self.has_prf
|
||||
if self.automatic_presence_simulation is not None:
|
||||
json['automaticPresenceSimulation'] = self.automatic_presence_simulation
|
||||
if self.is_user_verified is not None:
|
||||
json['isUserVerified'] = self.is_user_verified
|
||||
if self.default_backup_eligibility is not None:
|
||||
json['defaultBackupEligibility'] = self.default_backup_eligibility
|
||||
if self.default_backup_state is not None:
|
||||
json['defaultBackupState'] = self.default_backup_state
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
protocol=AuthenticatorProtocol.from_json(json['protocol']),
|
||||
transport=AuthenticatorTransport.from_json(json['transport']),
|
||||
ctap2_version=Ctap2Version.from_json(json['ctap2Version']) if 'ctap2Version' in json else None,
|
||||
has_resident_key=bool(json['hasResidentKey']) if 'hasResidentKey' in json else None,
|
||||
has_user_verification=bool(json['hasUserVerification']) if 'hasUserVerification' in json else None,
|
||||
has_large_blob=bool(json['hasLargeBlob']) if 'hasLargeBlob' in json else None,
|
||||
has_cred_blob=bool(json['hasCredBlob']) if 'hasCredBlob' in json else None,
|
||||
has_min_pin_length=bool(json['hasMinPinLength']) if 'hasMinPinLength' in json else None,
|
||||
has_prf=bool(json['hasPrf']) if 'hasPrf' in json else None,
|
||||
automatic_presence_simulation=bool(json['automaticPresenceSimulation']) if 'automaticPresenceSimulation' in json else None,
|
||||
is_user_verified=bool(json['isUserVerified']) if 'isUserVerified' in json else None,
|
||||
default_backup_eligibility=bool(json['defaultBackupEligibility']) if 'defaultBackupEligibility' in json else None,
|
||||
default_backup_state=bool(json['defaultBackupState']) if 'defaultBackupState' in json else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class Credential:
|
||||
credential_id: str
|
||||
|
||||
is_resident_credential: bool
|
||||
|
||||
#: The ECDSA P-256 private key in PKCS#8 format.
|
||||
private_key: str
|
||||
|
||||
#: Signature counter. This is incremented by one for each successful
|
||||
#: assertion.
|
||||
#: See https://w3c.github.io/webauthn/#signature-counter
|
||||
sign_count: int
|
||||
|
||||
#: Relying Party ID the credential is scoped to. Must be set when adding a
|
||||
#: credential.
|
||||
rp_id: typing.Optional[str] = None
|
||||
|
||||
#: An opaque byte sequence with a maximum size of 64 bytes mapping the
|
||||
#: credential to a specific user.
|
||||
user_handle: typing.Optional[str] = None
|
||||
|
||||
#: The large blob associated with the credential.
|
||||
#: See https://w3c.github.io/webauthn/#sctn-large-blob-extension
|
||||
large_blob: typing.Optional[str] = None
|
||||
|
||||
#: Assertions returned by this credential will have the backup eligibility
|
||||
#: (BE) flag set to this value. Defaults to the authenticator's
|
||||
#: defaultBackupEligibility value.
|
||||
backup_eligibility: typing.Optional[bool] = None
|
||||
|
||||
#: Assertions returned by this credential will have the backup state (BS)
|
||||
#: flag set to this value. Defaults to the authenticator's
|
||||
#: defaultBackupState value.
|
||||
backup_state: typing.Optional[bool] = None
|
||||
|
||||
def to_json(self):
|
||||
json = dict()
|
||||
json['credentialId'] = self.credential_id
|
||||
json['isResidentCredential'] = self.is_resident_credential
|
||||
json['privateKey'] = self.private_key
|
||||
json['signCount'] = self.sign_count
|
||||
if self.rp_id is not None:
|
||||
json['rpId'] = self.rp_id
|
||||
if self.user_handle is not None:
|
||||
json['userHandle'] = self.user_handle
|
||||
if self.large_blob is not None:
|
||||
json['largeBlob'] = self.large_blob
|
||||
if self.backup_eligibility is not None:
|
||||
json['backupEligibility'] = self.backup_eligibility
|
||||
if self.backup_state is not None:
|
||||
json['backupState'] = self.backup_state
|
||||
return json
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json):
|
||||
return cls(
|
||||
credential_id=str(json['credentialId']),
|
||||
is_resident_credential=bool(json['isResidentCredential']),
|
||||
private_key=str(json['privateKey']),
|
||||
sign_count=int(json['signCount']),
|
||||
rp_id=str(json['rpId']) if 'rpId' in json else None,
|
||||
user_handle=str(json['userHandle']) if 'userHandle' in json else None,
|
||||
large_blob=str(json['largeBlob']) if 'largeBlob' in json else None,
|
||||
backup_eligibility=bool(json['backupEligibility']) if 'backupEligibility' in json else None,
|
||||
backup_state=bool(json['backupState']) if 'backupState' in json else None,
|
||||
)
|
||||
|
||||
|
||||
def enable(
|
||||
enable_ui: typing.Optional[bool] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Enable the WebAuthn domain and start intercepting credential storage and
|
||||
retrieval with a virtual authenticator.
|
||||
|
||||
:param enable_ui: *(Optional)* Whether to enable the WebAuthn user interface. Enabling the UI is recommended for debugging and demo purposes, as it is closer to the real experience. Disabling the UI is recommended for automated testing. Supported at the embedder's discretion if UI is available. Defaults to false.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
if enable_ui is not None:
|
||||
params['enableUI'] = enable_ui
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'WebAuthn.enable',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Disable the WebAuthn domain.
|
||||
'''
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'WebAuthn.disable',
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def add_virtual_authenticator(
|
||||
options: VirtualAuthenticatorOptions
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,AuthenticatorId]:
|
||||
'''
|
||||
Creates and adds a virtual authenticator.
|
||||
|
||||
:param options:
|
||||
:returns:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['options'] = options.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'WebAuthn.addVirtualAuthenticator',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return AuthenticatorId.from_json(json['authenticatorId'])
|
||||
|
||||
|
||||
def set_response_override_bits(
|
||||
authenticator_id: AuthenticatorId,
|
||||
is_bogus_signature: typing.Optional[bool] = None,
|
||||
is_bad_uv: typing.Optional[bool] = None,
|
||||
is_bad_up: typing.Optional[bool] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Resets parameters isBogusSignature, isBadUV, isBadUP to false if they are not present.
|
||||
|
||||
:param authenticator_id:
|
||||
:param is_bogus_signature: *(Optional)* If isBogusSignature is set, overrides the signature in the authenticator response to be zero. Defaults to false.
|
||||
:param is_bad_uv: *(Optional)* If isBadUV is set, overrides the UV bit in the flags in the authenticator response to be zero. Defaults to false.
|
||||
:param is_bad_up: *(Optional)* If isBadUP is set, overrides the UP bit in the flags in the authenticator response to be zero. Defaults to false.
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['authenticatorId'] = authenticator_id.to_json()
|
||||
if is_bogus_signature is not None:
|
||||
params['isBogusSignature'] = is_bogus_signature
|
||||
if is_bad_uv is not None:
|
||||
params['isBadUV'] = is_bad_uv
|
||||
if is_bad_up is not None:
|
||||
params['isBadUP'] = is_bad_up
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'WebAuthn.setResponseOverrideBits',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def remove_virtual_authenticator(
|
||||
authenticator_id: AuthenticatorId
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Removes the given authenticator.
|
||||
|
||||
:param authenticator_id:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['authenticatorId'] = authenticator_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'WebAuthn.removeVirtualAuthenticator',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def add_credential(
|
||||
authenticator_id: AuthenticatorId,
|
||||
credential: Credential
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Adds the credential to the specified authenticator.
|
||||
|
||||
:param authenticator_id:
|
||||
:param credential:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['authenticatorId'] = authenticator_id.to_json()
|
||||
params['credential'] = credential.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'WebAuthn.addCredential',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def get_credential(
|
||||
authenticator_id: AuthenticatorId,
|
||||
credential_id: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,Credential]:
|
||||
'''
|
||||
Returns a single credential stored in the given virtual authenticator that
|
||||
matches the credential ID.
|
||||
|
||||
:param authenticator_id:
|
||||
:param credential_id:
|
||||
:returns:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['authenticatorId'] = authenticator_id.to_json()
|
||||
params['credentialId'] = credential_id
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'WebAuthn.getCredential',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return Credential.from_json(json['credential'])
|
||||
|
||||
|
||||
def get_credentials(
|
||||
authenticator_id: AuthenticatorId
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[Credential]]:
|
||||
'''
|
||||
Returns all the credentials stored in the given virtual authenticator.
|
||||
|
||||
:param authenticator_id:
|
||||
:returns:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['authenticatorId'] = authenticator_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'WebAuthn.getCredentials',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
return [Credential.from_json(i) for i in json['credentials']]
|
||||
|
||||
|
||||
def remove_credential(
|
||||
authenticator_id: AuthenticatorId,
|
||||
credential_id: str
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Removes a credential from the authenticator.
|
||||
|
||||
:param authenticator_id:
|
||||
:param credential_id:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['authenticatorId'] = authenticator_id.to_json()
|
||||
params['credentialId'] = credential_id
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'WebAuthn.removeCredential',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def clear_credentials(
|
||||
authenticator_id: AuthenticatorId
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Clears all the credentials from the specified device.
|
||||
|
||||
:param authenticator_id:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['authenticatorId'] = authenticator_id.to_json()
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'WebAuthn.clearCredentials',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_user_verified(
|
||||
authenticator_id: AuthenticatorId,
|
||||
is_user_verified: bool
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Sets whether User Verification succeeds or fails for an authenticator.
|
||||
The default is true.
|
||||
|
||||
:param authenticator_id:
|
||||
:param is_user_verified:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['authenticatorId'] = authenticator_id.to_json()
|
||||
params['isUserVerified'] = is_user_verified
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'WebAuthn.setUserVerified',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_automatic_presence_simulation(
|
||||
authenticator_id: AuthenticatorId,
|
||||
enabled: bool
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Sets whether tests of user presence will succeed immediately (if true) or fail to resolve (if false) for an authenticator.
|
||||
The default is true.
|
||||
|
||||
:param authenticator_id:
|
||||
:param enabled:
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['authenticatorId'] = authenticator_id.to_json()
|
||||
params['enabled'] = enabled
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'WebAuthn.setAutomaticPresenceSimulation',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
def set_credential_properties(
|
||||
authenticator_id: AuthenticatorId,
|
||||
credential_id: str,
|
||||
backup_eligibility: typing.Optional[bool] = None,
|
||||
backup_state: typing.Optional[bool] = None
|
||||
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
|
||||
'''
|
||||
Allows setting credential properties.
|
||||
https://w3c.github.io/webauthn/#sctn-automation-set-credential-properties
|
||||
|
||||
:param authenticator_id:
|
||||
:param credential_id:
|
||||
:param backup_eligibility: *(Optional)*
|
||||
:param backup_state: *(Optional)*
|
||||
'''
|
||||
params: T_JSON_DICT = dict()
|
||||
params['authenticatorId'] = authenticator_id.to_json()
|
||||
params['credentialId'] = credential_id
|
||||
if backup_eligibility is not None:
|
||||
params['backupEligibility'] = backup_eligibility
|
||||
if backup_state is not None:
|
||||
params['backupState'] = backup_state
|
||||
cmd_dict: T_JSON_DICT = {
|
||||
'method': 'WebAuthn.setCredentialProperties',
|
||||
'params': params,
|
||||
}
|
||||
json = yield cmd_dict
|
||||
|
||||
|
||||
@event_class('WebAuthn.credentialAdded')
|
||||
@dataclass
|
||||
class CredentialAdded:
|
||||
'''
|
||||
Triggered when a credential is added to an authenticator.
|
||||
'''
|
||||
authenticator_id: AuthenticatorId
|
||||
credential: Credential
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> CredentialAdded:
|
||||
return cls(
|
||||
authenticator_id=AuthenticatorId.from_json(json['authenticatorId']),
|
||||
credential=Credential.from_json(json['credential'])
|
||||
)
|
||||
|
||||
|
||||
@event_class('WebAuthn.credentialAsserted')
|
||||
@dataclass
|
||||
class CredentialAsserted:
|
||||
'''
|
||||
Triggered when a credential is used in a webauthn assertion.
|
||||
'''
|
||||
authenticator_id: AuthenticatorId
|
||||
credential: Credential
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json: T_JSON_DICT) -> CredentialAsserted:
|
||||
return cls(
|
||||
authenticator_id=AuthenticatorId.from_json(json['authenticatorId']),
|
||||
credential=Credential.from_json(json['credential'])
|
||||
)
|
Reference in New Issue
Block a user