pyspark dataframe UDF 异常处理
我使用 Python 编写了一个要在 Spark 中使用的 UDF。此函数接受 一个日期(以字符串形式,例如“2017-01-06”)和 一个字符串数组(例如:[2017-01-26、2017-02-26、2017-04-17]) 并返回自上次最近日期以来的天数。UDF 是
def findClosestPreviousDate(currdate, date_list):
date_format = "%Y-%m-%d"
currdate = datetime.datetime.strptime(currdate, date_format)
result = currdate
date_list = [datetime.datetime.strptime(x, date_format) for x in date_list if x != None]
lowestdiff = 10000
for dt in date_list:
if(dt >= currdate):
continue
delta = currdate-dt
diff = delta.days
if(diff < lowestdiff):
lowestdiff = diff
result = dt
dlt = currdate-result
return dlt.days
findClosestPreviousDateUdf = udf(findClosestPreviousDate,StringType())
我按以下方式调用它
findClosestPreviousDateUdf = udf(findClosestPreviousDate,StringType())
grouped_extend_df2 = grouped_extend_email_rec.withColumn('recency_eng', func.when(size(col("activity_arr")) > 0, findClosestPreviousDateUdf("expanded_datestr", "activity_arr")).otherwise(0))
即使我删除列“activity_arr”中的所有空值,我仍然会收到此 NoneType 错误。尝试在函数内部应用异常处理(仍然相同)。
我们是否有更好的方法从 UDF 中捕获运行时的错误记录(可能使用累加器等,我看到很少有人尝试使用 scala 进行相同的操作)
错误:
--------------------------------------------------------------------------- Py4JJavaError Traceback (most recent call last) in () ----> 1 grouped_extend_df2.show()
/usr/lib/spark/python/pyspark/sql/dataframe.pyc in show(self, n, truncate) 334 """ 335 if isinstance(truncate, bool) and truncate: --> 336 print(self._jdf.showString(n, 20)) 337 else: 338 print(self._jdf.showString(n, int(truncate)))
/usr/lib/spark/python/lib/py4j-0.10.4-src.zip/py4j/java_gateway.py in call (self, *args) 1131 answer = self.gateway_client.send_command(command) 1132 return_value = get_return_value( -> 1133 answer, self.gateway_client, self.target_id, self.name) 1134 1135 for temp_arg in temp_args:
/usr/lib/spark/python/pyspark/sql/utils.pyc in deco(*a, **kw) 61 def deco(*a, **kw): 62 try: ---> 63 return f(*a, **kw) 64 except py4j.protocol.Py4JJavaError as e: 65 s = e.java_exception.toString()
/usr/lib/spark/python/lib/py4j-0.10.4-src.zip/py4j/protocol.py in get_return_value(answer, gateway_client, target_id, name) 317 raise Py4JJavaError( 318 "An error occurred while calling {0}{1}{2}.\n". --> 319 format(target_id, ".", name), value) 320 else: 321 raise Py4JError(
Py4JJavaError: An error occurred while calling o1111.showString. : org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 315.0 failed 1 times, most recent failure: Lost task 0.0 in stage 315.0 (TID 18390, localhost, executor driver): org.apache.spark.api.python.PythonException: Traceback (most recent call last): File "/usr/lib/spark/python/lib/pyspark.zip/pyspark/worker.py", line 177, in main process() File "/usr/lib/spark/python/lib/pyspark.zip/pyspark/worker.py", line 172, in process serializer.dump_stream(func(split_index, iterator), outfile) File "/usr/lib/spark/python/lib/pyspark.zip/pyspark/worker.py", line 104, in func = lambda _, it: map(mapper, it) File "", line 1, in File "/usr/lib/spark/python/lib/pyspark.zip/pyspark/worker.py", line 71, in return lambda *a: f(*a) File "", line 5, in findClosestPreviousDate TypeError: 'NoneType' object is not iterable
at org.apache.spark.api.python.PythonRunner$$anon$1.read(PythonRDD.scala:193) at org.apache.spark.api.python.PythonRunner$$anon$1.(PythonRDD.scala:234) at org.apache.spark.api.python.PythonRunner.compute(PythonRDD.scala:152) at org.apache.spark.sql.execution.python.BatchEvalPythonExec$$anonfun$doExecute$1.apply(BatchEvalPythonExec.scala:144) at org.apache.spark.sql.execution.python.BatchEvalPythonExec$$anonfun$doExecute$1.apply(BatchEvalPythonExec.scala:87) at org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$23.apply(RDD.scala:797) at org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$23.apply(RDD.scala:797) at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323) at org.apache.spark.rdd.RDD.iterator(RDD.scala:287) at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323) at org.apache.spark.rdd.RDD.iterator(RDD.scala:287) at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323) at org.apache.spark.rdd.RDD.iterator(RDD.scala:287) at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87) at org.apache.spark.scheduler.Task.run(Task.scala:108) at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:338) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748)
Driver stacktrace: at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1517) at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1505) at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1504) at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59) at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48) at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1504) at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:814) at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:814) at scala.Option.foreach(Option.scala:257) at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:814) at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1732) at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1687) at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1676) at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48) at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:630) at org.apache.spark.SparkContext.runJob(SparkContext.scala:2029) at org.apache.spark.SparkContext.runJob(SparkContext.scala:2050) at org.apache.spark.SparkContext.runJob(SparkContext.scala:2069) at org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:336) at org.apache.spark.sql.execution.CollectLimitExec.executeCollect(limit.scala:38) at org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$collectFromPlan(Dataset.scala:2861) at org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:2150) at org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:2150) at org.apache.spark.sql.Dataset$$anonfun$55.apply(Dataset.scala:2842) at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:65) at org.apache.spark.sql.Dataset.withAction(Dataset.scala:2841) at org.apache.spark.sql.Dataset.head(Dataset.scala:2150) at org.apache.spark.sql.Dataset.take(Dataset.scala:2363) at org.apache.spark.sql.Dataset.showString(Dataset.scala:241) at sun.reflect.GeneratedMethodAccessor237.invoke(Unknown Source) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244) at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357) at py4j.Gateway.invoke(Gateway.java:280) at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132) at py4j.commands.CallCommand.execute(CallCommand.java:79) at py4j.GatewayConnection.run(GatewayConnection.java:214) at java.lang.Thread.run(Thread.java:748) Caused by: org.apache.spark.api.python.PythonException: Traceback (most recent call last): File "/usr/lib/spark/python/lib/pyspark.zip/pyspark/worker.py", line 177, in main process() File "/usr/lib/spark/python/lib/pyspark.zip/pyspark/worker.py", line 172, in process serializer.dump_stream(func(split_index, iterator), outfile) File "/usr/lib/spark/python/lib/pyspark.zip/pyspark/worker.py", line 104, in func = lambda _, it: map(mapper, it) File "", line 1, in File "/usr/lib/spark/python/lib/pyspark.zip/pyspark/worker.py", line 71, in return lambda *a: f(*a) File "", line 5, in findClosestPreviousDate TypeError: 'NoneType' object is not iterable
at org.apache.spark.api.python.PythonRunner$$anon$1.read(PythonRDD.scala:193) at org.apache.spark.api.python.PythonRunner$$anon$1.(PythonRDD.scala:234) at org.apache.spark.api.python.PythonRunner.compute(PythonRDD.scala:152) at org.apache.spark.sql.execution.python.BatchEvalPythonExec$$anonfun$doExecute$1.apply(BatchEvalPythonExec.scala:144) at org.apache.spark.sql.execution.python.BatchEvalPythonExec$$anonfun$doExecute$1.apply(BatchEvalPythonExec.scala:87) at org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$23.apply(RDD.scala:797) at org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$23.apply(RDD.scala:797) at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323) at org.apache.spark.rdd.RDD.iterator(RDD.scala:287) at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323) at org.apache.spark.rdd.RDD.iterator(RDD.scala:287) at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323) at org.apache.spark.rdd.RDD.iterator(RDD.scala:287) at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87) at org.apache.spark.scheduler.Task.run(Task.scala:108) at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:338) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) ... 1 more
我尝试了您的 udf,但它始终返回 0(int)。
dlt = currdate-result # result and currdate are same
return dlt.days # days is int type
但在创建 udf 时,您指定了 StringType。
findClosestPreviousDateUdf = udf(findClosestPreviousDate,StringType())
因此,我修改了
findClosestPreviousDate
函数,如有必要,请进行更改。
>>> in_dates = ['2017-01-26', '2017-02-26', '2017-04-17']
>>>
>>> def findClosestPreviousDate(currdate, date_list=in_dates):
... date_format = "%Y-%m-%d"
... currdate = datetime.datetime.strptime(currdate, date_format)
... date_list = [datetime.datetime.strptime(x, date_format) for x in date_list if x != None]
... diff = map(lambda dt: (currdate - dt).days, date_list)
... closestDate = min(filter(lambda days_diff: days_diff <= 0, diff))
... return closestDate if closestDate else 0
...
>>> findClosestPreviousDate('2017-01-06')
-101
还将 udf 的返回类型设为
IntegerType
。通过这些修改,代码可以正常工作,但请验证更改是否正确。 PySpark udfs 只能接受单个参数,有一个解决方法,请参阅
PySpark - 将列表作为参数传递给 UDF
>>> df.show()
+----------+
| date|
+----------+
|2017-01-06|
|2017-01-08|
+----------+
>>>
>>> in_dates = ['2017-01-26', '2017-02-26', '2017-04-17']
>>> def findClosestPreviousDate(currdate, date_list=in_dates):
... date_format = "%Y-%m-%d"
... currdate = datetime.datetime.strptime(currdate, date_format)
... date_list = [datetime.datetime.strptime(x, date_format) for x in date_list if x != None]
... diff = map(lambda dt: (currdate - dt).days, date_list)
... closestDate = min(filter(lambda days_diff: days_diff <= 0, diff))
... return closestDate if closestDate else 0
...
>>> findClosestPreviousDate('2017-01-06')
-101
>>>
>>> from pyspark.sql.types import IntegerType
>>> findClosestPreviousDateUDF = udf(findClosestPreviousDate, IntegerType())
>>> df.withColumn('closest_date', findClosestPreviousDateUDF(df['date'])).show()
+----------+------------+
| date|closest_date|
+----------+------------+
|2017-01-06| -101|
|2017-01-08| -99|
+----------+------------+
希望这有帮助!
我想已经解决了问题。这是我修改后的 UDF。
def findClosestPreviousDate(currdate, date_str):
date_format = "%Y-%m-%d"
currdate = datetime.datetime.strptime(currdate, date_format)
date_list = ''
result = currdate
if date_str is None:
return date_str
else:
date_list = date_str.split('|')
date_list = [datetime.datetime.strptime(x, date_format) for x in date_list if x != None]
lowestdiff = 10000
for dt in date_list:
if(dt >= currdate):
continue
delta = currdate-dt
diff = delta.days
if(diff < lowestdiff):
lowestdiff = diff
result = dt
dlt = currdate-result
return dlt.days
NoneType 错误是由于空值作为参数进入 UDF 而导致的,而我知道这一点。我想知道的是,当我使用 isNotNull() 函数时,为什么没有过滤掉空值。
尝试了
findClosestPreviousDateUdf = udf(findClosestPreviousDate,StringType())
grouped_extend_df2 = grouped_extend_email_rec.withColumn('recency_eng', func.when(size(col("activity_arr")) > 0, findClosestPreviousDateUdf("expanded_datestr", "activity_arr")).otherwise(0))
和
findClosestPreviousDateUdf = udf(findClosestPreviousDate,StringType())
grouped_extend_df2 = grouped_extend_email_rec.withColumn('recency_eng', func.when(col("activity_arr").isNotNull(), findClosestPreviousDateUdf("expanded_datestr", "activity_arr")).otherwise(0))
但是,当我将上面的 Python 函数中的 NoneType 传递给函数 findClosestPreviousDate() 时,如下所示
if date_str is None:
return date_str
else:
date_list = date_str.split('|')
它起作用了。