30亿条数据,50w每个批次写入报错,如何优化呢?
User class threw exception: java.lang.RuntimeException: Execute Spark task error
at org.apache.seatunnel.core.spark.command.SparkTaskExecuteCommand.execute(SparkTaskExecuteCommand.java:79)
at org.apache.seatunnel.core.base.Seatunnel.run(Seatunnel.java:39)
at org.apache.seatunnel.core.spark.SeatunnelSpark.main(SeatunnelSpark.java:32)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at org.apache.spark.deploy.yarn.ApplicationMaster$$anon$2.run(ApplicationMaster.scala:694)
Caused by: org.apache.spark.SparkException: Job aborted due to stage failure: Task 2 in stage 0.0 failed 4 times, most recent failure: Lost task 2.3 in stage 0.0 (TID 89, slave27.offline.gitv.we, executor 5): java.io.IOException: Failed to load batch data on BE: http://10.58.130.41:8043/api/cmcc_dws/dws_livod_play_pb_stat_mm/_stream_load node.
at org.apache.doris.spark.writer.DorisWriter$$anonfun$doWrite$1.apply(DorisWriter.scala:107)
at org.apache.doris.spark.writer.DorisWriter$$anonfun$doWrite$1.apply(DorisWriter.scala:95)
at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$28.apply(RDD.scala:935)
at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$28.apply(RDD.scala:935)
at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2101)
at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2101)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
at org.apache.spark.scheduler.Task.run(Task.scala:121)
at org.apache.spark.executor.Executor$TaskRunner$$anonfun$10.apply(Executor.scala:408)
at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1405)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:414)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:745)
Caused by: org.apache.doris.spark.exception.StreamLoadException: stream load error, load status:Fail, response:status: 200, resp msg: OK, resp content: {
"TxnId": 3395596,
"Label": "spark_streamload_20240708_204341_0cb82b1982164c5094ff2c473cb65a12",
"Comment": "",
"TwoPhaseCommit": "false",
"Status": "Fail",
"Message": "[INTERNAL_ERROR]cancelled: tablet error: tablet writer write failed, tablet_id=3620920, txn_id=3395596, err=[E-235]failed to init rowset builder. version count: 2053, exceed limit: 2000, tablet: 3620920, host: 10.58.130.41",
"NumberTotalRows": 20320,
"NumberLoadedRows": 20320,
"NumberFilteredRows": 0,
"NumberUnselectedRows": 0,
"LoadBytes": 75910799,
"LoadTimeMs": 12808,
"BeginTxnTimeMs": 0,
"StreamLoadPutTimeMs": 3,
"ReadDataTimeMs": 145,
"WriteDataTimeMs": 1304,
"CommitAndPublishTimeMs": 0
}
at org.apache.doris.spark.load.DorisStreamLoad.load(DorisStreamLoad.java:255)
at org.apache.doris.spark.writer.DorisWriter$$anonfun$write$1.apply(DorisWriter.scala:71)
at org.apache.doris.spark.writer.DorisWriter$$anonfun$write$1.apply(DorisWriter.scala:71)
at org.apache.doris.spark.writer.DorisWriter$$anonfun$doWrite$1$$anonfun$1.apply$mcJ$sp(DorisWriter.scala:100)
at org.apache.doris.spark.writer.DorisWriter$$anonfun$doWrite$1$$anonfun$1.apply(DorisWriter.scala:100)
at org.apache.doris.spark.writer.DorisWriter$$anonfun$doWrite$1$$anonfun$1.apply(DorisWriter.scala:100)
at scala.util.Try$.apply(Try.scala:192)
at org.apache.doris.spark.sql.Utils$.retry(Utils.scala:182)
at org.apache.doris.spark.writer.DorisWriter$$anonfun$doWrite$1$$anonfun$3$$anonfun$apply$1.apply(DorisWriter.scala:99)
at org.apache.doris.spark.writer.DorisWriter$$anonfun$doWrite$1$$anonfun$3$$anonfun$apply$1.apply(DorisWriter.scala:99)
at org.apache.doris.spark.writer.DorisWriter$$anonfun$doWrite$1.apply(DorisWriter.scala:100)
... 13 more