digraph G {
0 [labelType="html" label="<br><b>AdaptiveSparkPlan</b><br><br>"];
1 [labelType="html" label="<b>Execute InsertIntoHadoopFsRelationCommand</b><br><br>task commit time total (min, med, max (stageId: taskId))<br>8.5 s (203 ms, 266 ms, 763 ms (stage 448.0: task 3000))<br>number of written files: 25<br>job commit time: 7.0 s<br>number of output rows: 1,196<br>number of dynamic part: 0<br>written output: 83.2 KiB"];
2 [labelType="html" label="<br><b>WriteFiles</b><br><br>"];
subgraph cluster3 {
isCluster="true";
label="WholeStageCodegen (3)\n \nduration: total (min, med, max (stageId: taskId))\n1.0 s (22 ms, 26 ms, 219 ms (stage 448.0: task 3190))";
4 [labelType="html" label="<br><b>Project</b><br><br>"];
5 [labelType="html" label="<b>Filter</b><br><br>number of output rows: 1,196"];
}
6 [labelType="html" label="<b>Window</b><br><br>spill size total (min, med, max (stageId: taskId))<br>0.0 B (0.0 B, 0.0 B, 0.0 B (stage 448.0: task 2999))"];
7 [labelType="html" label="<b>WindowGroupLimit</b><br><br>number of output rows: 1,196"];
subgraph cluster8 {
isCluster="true";
label="WholeStageCodegen (2)\n \nduration: total (min, med, max (stageId: taskId))\n1.1 s (24 ms, 29 ms, 221 ms (stage 448.0: task 3190))";
9 [labelType="html" label="<b>Sort</b><br><br>sort time total (min, med, max (stageId: taskId))<br>0 ms (0 ms, 0 ms, 0 ms (stage 448.0: task 2999))<br>peak memory total (min, med, max (stageId: taskId))<br>51.6 MiB (2.1 MiB, 2.1 MiB, 2.1 MiB (stage 448.0: task 2999))<br>spill size total (min, med, max (stageId: taskId))<br>0.0 B (0.0 B, 0.0 B, 0.0 B (stage 448.0: task 2999))"];
}
10 [labelType="html" label="<b>Exchange</b><br><br>shuffle records written: 1,196<br>local merged chunks fetched: 0<br>shuffle write time: 2 ms<br>remote merged bytes read: 0.0 B<br>local merged blocks fetched: 0<br>corrupt merged block chunks: 0<br>remote merged reqs duration: 0 ms<br>remote merged blocks fetched: 0<br>records read: 1,196<br>local bytes read total (min, med, max (stageId: taskId))<br>50.7 KiB (1702.0 B, 2.0 KiB, 2.4 KiB (stage 448.0: task 3092))<br>fetch wait time total (min, med, max (stageId: taskId))<br>0 ms (0 ms, 0 ms, 0 ms (stage 448.0: task 2999))<br>remote bytes read: 0.0 B<br>merged fetch fallback count: 0<br>local blocks read: 25<br>remote merged chunks fetched: 0<br>remote blocks read: 0<br>data size: 112.1 KiB<br>local merged bytes read: 0.0 B<br>number of partitions: 25<br>remote reqs duration: 0 ms<br>remote bytes read to disk: 0.0 B<br>shuffle bytes written: 50.7 KiB"];
subgraph cluster11 {
isCluster="true";
label="WholeStageCodegen (1)\n \nduration: 153 ms";
12 [labelType="html" label="<b>ColumnarToRow</b><br><br>number of output rows: 1,196<br>number of input batches: 1"];
}
13 [labelType="html" label="<b>Scan parquet </b><br><br>number of files read: 1<br>scan time: 140 ms<br>metadata time: 0 ms<br>size of files read: 37.4 KiB<br>number of output rows: 1,196"];
1->0;
2->1;
4->2;
5->4;
6->5;
7->6;
9->7;
10->9;
12->10;
13->12;
}
14
AdaptiveSparkPlan isFinalPlan=true
Execute InsertIntoHadoopFsRelationCommand hdlfs://2e93940d-4be8-4f12-830d-f0b8d392c03a.files.hdl.prod-eu20.hanacloud.ondemand.com:443/crp-order-qty-opt-service/out/product-plant-list/10000000096/shardId=0_1_10000000096, false, Parquet, [path=hdlfs://2e93940d-4be8-4f12-830d-f0b8d392c03a.files.hdl.prod-eu20.hanacloud.ondemand.com:443/crp-order-qty-opt-service/out/product-plant-list/10000000096/shardId=0_1_10000000096], Overwrite, [product, plant, orderDateTime, planningStartDateTime]
WriteFiles
Project [product#41213, plant#41210, orderDateTime#41214, 2026-02-06 10:27:00 AS planningStartDateTime#41291]
Filter (rank#41248 = 1)
WholeStageCodegen (3)
Window [row_number() windowspecdefinition(product#41213, plant#41210, orderDateTime#41214 ASC NULLS FIRST, specifiedwindowframe(RowFrame, unboundedpreceding$(), currentrow$())) AS rank#41248], [product#41213, plant#41210], [orderDateTime#41214 ASC NULLS FIRST]
WindowGroupLimit [product#41213, plant#41210], [orderDateTime#41214 ASC NULLS FIRST], row_number(), 1, Final
Sort [product#41213 ASC NULLS FIRST, plant#41210 ASC NULLS FIRST, orderDateTime#41214 ASC NULLS FIRST], false, 0
WholeStageCodegen (2)
Exchange hashpartitioning(product#41213, plant#41210, 25), REPARTITION_BY_NUM, [plan_id=12168]
ColumnarToRow
WholeStageCodegen (1)
FileScan parquet [plant#41210,product#41213,orderDateTime#41214] Batched: true, DataFilters: [], Format: Parquet, Location: InMemoryFileIndex(1 paths)[hdlfs://2e93940d-4be8-4f12-830d-f0b8d392c03a.files.hdl.prod-eu20.hanac..., PartitionFilters: [], PushedFilters: [], ReadSchema: struct<plant:string,product:string,orderDateTime:timestamp>
== Physical Plan ==
AdaptiveSparkPlan (20)
+- == Final Plan ==
Execute InsertIntoHadoopFsRelationCommand (11)
+- WriteFiles (10)
+- * Project (9)
+- * Filter (8)
+- Window (7)
+- WindowGroupLimit (6)
+- * Sort (5)
+- ShuffleQueryStage (4), Statistics(sizeInBytes=112.1 KiB, rowCount=1.20E+3)
+- Exchange (3)
+- * ColumnarToRow (2)
+- Scan parquet (1)
+- == Initial Plan ==
Execute InsertIntoHadoopFsRelationCommand (19)
+- WriteFiles (18)
+- Project (17)
+- Filter (16)
+- Window (15)
+- WindowGroupLimit (14)
+- Sort (13)
+- Exchange (12)
+- Scan parquet (1)
(1) Scan parquet
Output [3]: [plant#41210, product#41213, orderDateTime#41214]
Batched: true
Location: InMemoryFileIndex [hdlfs://2e93940d-4be8-4f12-830d-f0b8d392c03a.files.hdl.prod-eu20.hanacloud.ondemand.com:443/crp-order-qty-opt-service/out/order-proposal-boundary-snapshot/10000000096/shardId=0_1_10000000096]
ReadSchema: struct<plant:string,product:string,orderDateTime:timestamp>
(2) ColumnarToRow [codegen id : 1]
Input [3]: [plant#41210, product#41213, orderDateTime#41214]
(3) Exchange
Input [3]: [plant#41210, product#41213, orderDateTime#41214]
Arguments: hashpartitioning(product#41213, plant#41210, 25), REPARTITION_BY_NUM, [plan_id=12168]
(4) ShuffleQueryStage
Output [3]: [plant#41210, product#41213, orderDateTime#41214]
Arguments: 0
(5) Sort [codegen id : 2]
Input [3]: [plant#41210, product#41213, orderDateTime#41214]
Arguments: [product#41213 ASC NULLS FIRST, plant#41210 ASC NULLS FIRST, orderDateTime#41214 ASC NULLS FIRST], false, 0
(6) WindowGroupLimit
Input [3]: [plant#41210, product#41213, orderDateTime#41214]
Arguments: [product#41213, plant#41210], [orderDateTime#41214 ASC NULLS FIRST], row_number(), 1, Final
(7) Window
Input [3]: [plant#41210, product#41213, orderDateTime#41214]
Arguments: [row_number() windowspecdefinition(product#41213, plant#41210, orderDateTime#41214 ASC NULLS FIRST, specifiedwindowframe(RowFrame, unboundedpreceding$(), currentrow$())) AS rank#41248], [product#41213, plant#41210], [orderDateTime#41214 ASC NULLS FIRST]
(8) Filter [codegen id : 3]
Input [4]: [plant#41210, product#41213, orderDateTime#41214, rank#41248]
Condition : (rank#41248 = 1)
(9) Project [codegen id : 3]
Output [4]: [product#41213, plant#41210, orderDateTime#41214, 2026-02-06 10:27:00 AS planningStartDateTime#41291]
Input [4]: [plant#41210, product#41213, orderDateTime#41214, rank#41248]
(10) WriteFiles
Input [4]: [product#41213, plant#41210, orderDateTime#41214, planningStartDateTime#41291]
(11) Execute InsertIntoHadoopFsRelationCommand
Input: []
Arguments: hdlfs://2e93940d-4be8-4f12-830d-f0b8d392c03a.files.hdl.prod-eu20.hanacloud.ondemand.com:443/crp-order-qty-opt-service/out/product-plant-list/10000000096/shardId=0_1_10000000096, false, Parquet, [path=hdlfs://2e93940d-4be8-4f12-830d-f0b8d392c03a.files.hdl.prod-eu20.hanacloud.ondemand.com:443/crp-order-qty-opt-service/out/product-plant-list/10000000096/shardId=0_1_10000000096], Overwrite, [product, plant, orderDateTime, planningStartDateTime]
(12) Exchange
Input [3]: [plant#41210, product#41213, orderDateTime#41214]
Arguments: hashpartitioning(product#41213, plant#41210, 25), REPARTITION_BY_NUM, [plan_id=12137]
(13) Sort
Input [3]: [plant#41210, product#41213, orderDateTime#41214]
Arguments: [product#41213 ASC NULLS FIRST, plant#41210 ASC NULLS FIRST, orderDateTime#41214 ASC NULLS FIRST], false, 0
(14) WindowGroupLimit
Input [3]: [plant#41210, product#41213, orderDateTime#41214]
Arguments: [product#41213, plant#41210], [orderDateTime#41214 ASC NULLS FIRST], row_number(), 1, Final
(15) Window
Input [3]: [plant#41210, product#41213, orderDateTime#41214]
Arguments: [row_number() windowspecdefinition(product#41213, plant#41210, orderDateTime#41214 ASC NULLS FIRST, specifiedwindowframe(RowFrame, unboundedpreceding$(), currentrow$())) AS rank#41248], [product#41213, plant#41210], [orderDateTime#41214 ASC NULLS FIRST]
(16) Filter
Input [4]: [plant#41210, product#41213, orderDateTime#41214, rank#41248]
Condition : (rank#41248 = 1)
(17) Project
Output [4]: [product#41213, plant#41210, orderDateTime#41214, 2026-02-06 10:27:00 AS planningStartDateTime#41291]
Input [4]: [plant#41210, product#41213, orderDateTime#41214, rank#41248]
(18) WriteFiles
Input [4]: [product#41213, plant#41210, orderDateTime#41214, planningStartDateTime#41291]
(19) Execute InsertIntoHadoopFsRelationCommand
Input: []
Arguments: hdlfs://2e93940d-4be8-4f12-830d-f0b8d392c03a.files.hdl.prod-eu20.hanacloud.ondemand.com:443/crp-order-qty-opt-service/out/product-plant-list/10000000096/shardId=0_1_10000000096, false, Parquet, [path=hdlfs://2e93940d-4be8-4f12-830d-f0b8d392c03a.files.hdl.prod-eu20.hanacloud.ondemand.com:443/crp-order-qty-opt-service/out/product-plant-list/10000000096/shardId=0_1_10000000096], Overwrite, [product, plant, orderDateTime, planningStartDateTime]
(20) AdaptiveSparkPlan
Output: []
Arguments: isFinalPlan=true