diff --git a/databricks/sdk/service/compute.py b/databricks/sdk/service/compute.py index 4e6a02152..7f22e57f3 100755 --- a/databricks/sdk/service/compute.py +++ b/databricks/sdk/service/compute.py @@ -786,12 +786,24 @@ class ClusterDetails: - Name: """ + disk_spec: Optional[dict] = None + """[PROD-2198] An APC attribute only. This field is missing in the API docs and the unforked databricks + sdk so it needed to be added here""" + docker_image: Optional[DockerImage] = None driver: Optional[SparkNode] = None """Node on which the Spark driver resides. The driver node contains the Spark master and the Databricks application that manages the per-notebook Spark REPLs.""" + driver_healthy: Optional[bool] = None + """[PROD-2198] An APC attribute only. This field is missing in the API docs and the unforked databricks + sdk so it needed to be added here""" + + driver_instance_source: Optional[dict] = None + """[PROD-2198] An APC attribute only. This field is missing in the API docs and the unforked databricks + sdk so it needed to be added here""" + driver_instance_pool_id: Optional[str] = None """The optional ID of the instance pool for the driver of the cluster belongs. The pool cluster uses the instance pool with id (instance_pool_id) if the driver pool is not assigned.""" @@ -800,6 +812,10 @@ class ClusterDetails: """The node type of the Spark driver. Note that this field is optional; if unset, the driver node type will be set as the same value as `node_type_id` defined above.""" + effective_spark_version: Optional[str] = None + """[PROD-2198] An APC attribute only. This field is missing in the API docs and the unforked databricks + sdk so it needed to be added here""" + enable_elastic_disk: Optional[bool] = None """Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk space when its Spark workers are running low on disk space. This feature requires specific AWS @@ -820,13 +836,25 @@ class ClusterDetails: scripts are executed sequentially in the order provided. If `cluster_log_conf` is specified, init script logs are sent to `//init_scripts`.""" + init_scripts_safe_mode: Optional[int] = None + """[PROD-2198] An APC attribute only. This field is missing in the API docs and the unforked databricks + sdk so it needed to be added here""" + instance_pool_id: Optional[str] = None """The optional ID of the instance pool to which the cluster belongs.""" + instance_source: Optional[dict] = None + """[PROD-2198] An APC attribute only. This field is missing in the API docs and the unforked databricks + sdk so it needed to be added here""" + jdbc_port: Optional[int] = None """Port on which Spark JDBC server is listening, in the driver nod. No service will be listeningon on this port in executor nodes.""" + last_activity_time: Optional[int] = None + """[PROD-2198] An APC attribute only. This field is missing in the API docs and the unforked databricks + sdk so it needed to be added here""" + last_restarted_time: Optional[int] = None """the timestamp that the cluster was started/restarted""" @@ -3141,6 +3169,8 @@ class EventDetailsCause(Enum): class EventType(Enum): + # [PROD-2198] Test data in the backend has an event type that was missing here + ADD_NODES_FAILED = 'ADD_NODES_FAILED' AUTOSCALING_STATS_REPORT = 'AUTOSCALING_STATS_REPORT' CREATING = 'CREATING' DBFS_DOWN = 'DBFS_DOWN' diff --git a/databricks/sdk/service/jobs.py b/databricks/sdk/service/jobs.py index f96d7dd75..808e785c0 100755 --- a/databricks/sdk/service/jobs.py +++ b/databricks/sdk/service/jobs.py @@ -3408,7 +3408,8 @@ def from_dict(cls, d: Dict[str, any]) -> RunState: class RunTask: """Used when outputting a child run, in GetRun or ListRuns.""" - task_key: str + # [PROD-2198] adding the default here is necessary to process legacy cluster reports + task_key: str = None """A unique name for the task. This field is used to refer to this task from other tasks. This field is required and must be unique within its parent job. On Update or Reset, this field is used to reference the tasks to be updated or reset."""