8000 more notebook updates · codeaudit/pipeline-1@9cb99fc · GitHub
[go: up one dir, main page]

Skip to content

Commit 9cb99fc

Browse files
committed
more notebook updates
1 parent af20f95 commit 9cb99fc

File tree

16 files changed

+337
-227
lines changed

16 files changed

+337
-227
lines changed

config/.vim/.netrwhist

Lines changed: 1 addition & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,2 @@
11
let g:netrw_dirhistmax =10
2-
let g:netrw_dirhist_cnt =9
3-
let g:netrw_dirhist_1='/root/pipeline/config/logstash'
4-
let g:netrw_dirhist_2='/root/pipeline/flows/flowfile_repository'
5-
let g:netrw_dirhist_3='/root/pipeline/myapps/ml/src/main/scala/com/advancedspark/ml/graph'
6-
let g:netrw_dirhist_4='/root/pipeline/config/zeppelin'
7-
let g:netrw_dirhist_5='/root/pipeline/myapps/ml/src/main/scala/com/advancedspark/ml/graph'
8-
let g:netrw_dirhist_6='/root/spark-1.6.0-bin-fluxcapacitor/tachyon'
9-
let g:netrw_dirhist_7='/root/spark-1.6.0-bin-fluxcapacitor/tachyon/bin'
10-
let g:netrw_dirhist_8='/root/pipeline/myapps/streaming'
11-
let g:netrw_dirhist_9='/root/pipeline/myapps/streaming/src/main/scala/com/advancedspark/streaming/rating/approx'
2+
let g:netrw_dirhist_cnt =0

config/zeppelin/interpreter.json

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -28,9 +28,9 @@
2828
"spark.app.name": "Zeppelin",
2929
"spark.executor.memory": "2g",
3030
"zeppelin.spark.useHiveContext": "true",
31-
"spark.home": "/root/spark-1.6.0-bin-fluxcapacitor",
32-
"zeppelin.spark.concurrentSQL": "false",
3331
"args": "",
32+
"zeppelin.spark.concurrentSQL": "false",
33+
"spark.home": "/root/spark-1.6.0-bin-fluxcapacitor",
3434
"zeppelin.pyspark.python": "python",
3535
"zeppelin.dep.additionalRemoteRepository": "spark-packages,http://dl.bintray.com/spark-packages/maven,false;"
3636
},
@@ -64,8 +64,8 @@
6464
"default.password": "",
6565
"default.user": "hive",
6666
"hive.hiveserver2.url": "jdbc:hive2://127.0.0.1:10000",
67-
"default.driver": "org.apache.hive.jdbc.HiveDriver",
6867
"default.url": "jdbc:hive2://127.0.0.1:10000",
68+
"default.driver": "org.apache.hive.jdbc.HiveDriver",
6969
"common.max_count": "1000",
7070
"hive.hiveserver2.password": "",
7171
"hive.hiveserver2.user": "hiveuser"
@@ -560,6 +560,11 @@
560560
"2AS9P7JSA",
561561
"2ARR8UZDJ",
562562
"2AR33ZMZJ"
563+
],
564+
"2BCGC211S": [
565+
"2AS9P7JSA",
566+
"2ARR8UZDJ",
567+
"2AR33ZMZJ"
563568
]
564569
}
565-
}
570+
}

data_persist/zeppelin/2ASEWJ19K/note.json

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -149,11 +149,11 @@
149149
{
150150
"title": "Extract and Enrich the Top Influencers",
151151
"text": "// Get the Top 10 Influencers\nval topInfluencers \u003d pageRank.vertices.top(5)(Ordering.by(rank \u003d\u003e rank._2))\nval topInfluencersDF \u003d sc.parallelize(topInfluencers).toDF(\"itemId\", \"rank\")\n\nval enrichedTopInfluencersDF \u003d topInfluencersDF.join(usersDF, $\"id\" \u003d\u003d\u003d $\"itemId\")\n .select($\"id\", $\"name\", $\"rank\")\n .sort($\"rank\" desc)\n \nz.show(enrichedTopInfluencersDF)",
152-
"dateUpdated": "Jan 16, 2016 5:23:57 AM",
152+
"dateUpdated": "Feb 11, 2016 9:27:12 PM",
153153
"config": {
154154
"colWidth": 12.0,
155155
"graph": {
156-
"mode": "table",
156+
"mode": "pieChart",
157157
"height": 300.0,
158158
"optionOpen": false,
159159
"keys": [

data_persist/zeppelin/2AUUDPT56/note.json

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -192,6 +192,18 @@
192192
"status": "FINISHED",
193193
"progressUpdateIntervalMs": 500
194194
},
195+
{
196+
"config": {},
197+
"settings": {
198+
"params": {},
199+
"forms": {}
200+
},
201+
"jobName": "paragraph_1455251808245_-631760587",
202+
"id": "20160212-043648_845375799",
203+
"dateCreated": "Feb 12, 2016 4:36:48 AM",
204+
"status": "READY",
205+
"progressUpdateIntervalMs": 500
206+
},
195207
{
196208
"title": "TODOOOOO: Distribution of Ratings",
197209
"text": "z.show(joinedDF.describe(\"count\"))",

data_persist/zeppelin/2AUYFSKXN/note.json

Lines changed: 33 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
{
44
"title": "Collaborative Filtering: Alternating Least Squares Matrix Factorization",
55
"text": "%md ![Alternating Least Squares - Matrix Factorization](https://raw.githubusercontent.com/cfregly/spark-after-dark/master/img/ALS.png)",
6-
"dateUpdated": "Feb 4, 2016 3:14:56 AM",
6+
"dateUpdated": "Feb 12, 2016 4:38:33 AM",
77
"config": {
88
"colWidth": 12.0,
99
"graph": {
@@ -32,14 +32,14 @@
3232
"msg": "\u003cp\u003e\u003cimg src\u003d\"https://raw.githubusercontent.com/cfregly/spark-after-dark/master/img/ALS.png\" alt\u003d\"Alternating Least Squares - Matrix Factorization\" /\u003e\u003c/p\u003e\n"
3333
},
3434
"dateCreated": "Jul 4, 2015 2:49:13 AM",
35-
"dateStarted": "Jan 16, 2016 4:40:54 PM",
36-
"dateFinished": "Jan 16, 2016 4:40:54 PM",
35+
"dateStarted": "Feb 12, 2016 4:38:33 AM",
36+
"dateFinished": "Feb 12, 2016 4:38:33 AM",
3737
"status": "FINISHED",
3838
"progressUpdateIntervalMs": 500
3939
},
4040
{
4141
"text": "%md ![User-to-Item Similarity with Facebook](http://fluxcapacitor.com/img/netflix-facebook-integration.png)",
42-
"dateUpdated": "Feb 4, 2016 3:15:46 AM",
42+
"dateUpdated": "Feb 12, 2016 4:38:33 AM",
4343
"config": {
4444
"colWidth": 12.0,
4545
"graph": {
@@ -66,15 +66,15 @@
6666
"msg": "\u003cp\u003e\u003cimg src\u003d\"http://fluxcapacitor.com/img/netflix-facebook-integration.png\" alt\u003d\"User-to-Item Similarity with Facebook\" /\u003e\u003c/p\u003e\n"
6767
},
6868
"dateCreated": "Feb 4, 2016 3:14:53 AM",
69-
"dateStarted": "Feb 4, 2016 3:15:46 AM",
70-
"dateFinished": "Feb 4, 2016 3:15:46 AM",
69+
"dateStarted": "Feb 12, 2016 4:38:33 AM",
70+
"dateFinished": "Feb 12, 2016 4:38:33 AM",
7171
"status": "FINISHED",
7272
"progressUpdateIntervalMs": 500
7373
},
7474
{
7575
"title": "Train The Model Using The Historical Training Split Of The Historical Data",
7676
"text": "import org.apache.spark.ml.recommendation.ALS\n\nval rank \u003d 10\nval maxIterations \u003d 20\nval convergenceThreshold \u003d 0.01\n\nval als \u003d new ALS()\n .setRank(rank)\n .setRegParam(convergenceThreshold)\n .setUserCol(\"userId\")\n .setItemCol(\"itemId\")\n .setRatingCol(\"rating\")\n\nval model \u003d als.fit(itemRatingsDF)\n\nmodel.setPredictionCol(\"confidence\")",
77-
"dateUpdated": "Jan 16, 2016 4:40:54 PM",
77+
"dateUpdated": "Feb 12, 2016 4:38:33 AM",
7878
"config": {
7979
"colWidth": 12.0,
8080
"graph": {
@@ -98,20 +98,20 @@
9898
"jobName": "paragraph_1435978256373_-160526409",
9999
"id": "20150704-025056_169923529",
100100
"result": {
101-
"code": "SUCCESS",
101+
"code": "ERROR",
102102
"type": "TEXT",
103-
"msg": "import org.apache.spark.ml.recommendation.ALS\nrank: Int \u003d 10\nmaxIterations: Int \u003d 20\nconvergenceThreshold: Double \u003d 0.01\nals: org.apache.spark.ml.recommendation.ALS \u003d als_9a2aa41f1d28\nmodel: org.apache.spark.ml.recommendation.ALSModel \u003d als_9a2aa41f1d28\nres29: model.type \u003d als_9a2aa41f1d28\n"
103+
"msg": "import org.apache.spark.ml.recommendation.ALS\nrank: Int \u003d 10\nmaxIterations: Int \u003d 20\nconvergenceThreshold: Double \u003d 0.01\nals: org.apache.spark.ml.recommendation.ALS \u003d als_0713aff1e6f6\n\u003cconsole\u003e:35: error: not found: value itemRatingsDF\n val model \u003d als.fit(itemRatingsDF)\n ^\n"
104104
},
105105
"dateCreated": "Jul 4, 2015 2:50:56 AM",
106-
"dateStarted": "Jan 16, 2016 4:40:54 PM",
107-
"dateFinished": "Jan 16, 2016 4:40:58 PM",
108-
"status": "FINISHED",
106+
"dateStarted": "Feb 12, 2016 4:38:33 AM",
107+
"dateFinished": "Feb 12, 2016 4:38:33 AM",
108+
"status": "ERROR",
109109
"progressUpdateIntervalMs": 500
110110
},
111111
{
112112
"title": "Generate Personalized Recommendations For Each Distinct User",
113113
"text": "val recommendationsDF \u003d model.transform(itemRatingsDF.select($\"userId\", $\"itemId\"))\n\nval enrichedRecommendationsDF \u003d \n recommendationsDF.join(itemsDF, $\"itemId\" \u003d\u003d\u003d $\"id\")\n .select($\"userId\", $\"itemId\", $\"title\", $\"description\", $\"tags\", $\"img\", $\"confidence\")\n .sort($\"userId\", $\"confidence\" desc)",
114-
"dateUpdated": "Jan 16, 2016 4:40:54 PM",
114+
"dateUpdated": "Feb 12, 2016 4:38:33 AM",
115115
"config": {
116116
"colWidth": 12.0,
117117
"graph": {
@@ -161,17 +161,17 @@
161161
"result": {
162162
"code": "ERROR",
163163
"type": "TEXT",
164-
"msg": "java.lang.IllegalArgumentException: Field \"lyrics\" does not exist.\n\tat org.apache.spark.sql.types.StructType$$anonfun$apply$1.apply(StructType.scala:212)\n\tat org.apache.spark.sql.types.StructType$$anonfun$apply$1.apply(StructType.scala:212)\n\tat scala.collection.MapLike$class.getOrElse(MapLike.scala:128)\n\tat scala.collection.AbstractMap.getOrElse(Map.scala:58)\n\tat org.apache.spark.sql.types.StructType.apply(StructType.scala:211)\n\tat org.apache.spark.ml.UnaryTransformer.transformSchema(Transformer.scala:106)\n\tat org.apache.spark.ml.PipelineModel$$anonfun$transformSchema$5.apply(Pipeline.scala:301)\n\tat org.apache.spark.ml.PipelineModel$$anonfun$transformSchema$5.apply(Pipeline.scala:301)\n\tat scala.collection.IndexedSeqOptimized$class.foldl(IndexedSeqOptimized.scala:51)\n\tat scala.collection.IndexedSeqOptimized$class.foldLeft(IndexedSeqOptimized.scala:60)\n\tat scala.collection.mutable.ArrayOps$ofRef.foldLeft(ArrayOps.scala:108)\n\tat org.apache.spark.ml.PipelineModel.transformSchema(Pipeline.scala:301)\n\tat org.apache.spark.ml.PipelineStage.transformSchema(Pipeline.scala:68)\n\tat org.apache.spark.ml.PipelineModel.transform(Pipeline.scala:296)\n\tat $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.\u003cinit\u003e(\u003cconsole\u003e:71)\n\tat $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.\u003cinit\u003e(\u003cconsole\u003e:76)\n\tat $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.\u003cinit\u003e(\u003cconsole\u003e:78)\n\tat $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.\u003cinit\u003e(\u003cconsole\u003e:80)\n\tat $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.\u003cinit\u003e(\u003cconsole\u003e:82)\n\tat $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.\u003cinit\u003e(\u003cconsole\u003e:84)\n\tat $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.\u003cinit\u003e(\u003cconsole\u003e:86)\n\tat $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.\u003cinit\u003e(\u003cconsole\u003e:88)\n\tat $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.\u003cinit\u003e(\u003cconsole\u003e:90)\n\tat $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.\u003cinit\u003e(\u003cconsole\u003e:92)\n\tat $iwC$$iwC$$iwC$$iwC$$iwC$$iwC.\u003cinit\u003e(\u003cconsole\u003e:94)\n\tat $iwC$$iwC$$iwC$$iwC$$iwC.\u003cinit\u003e(\u003cconsole\u003e:96)\n\tat $iwC$$iwC$$iwC$$iwC.\u003cinit\u003e(\u003cconsole\u003e:98)\n\tat $iwC$$iwC$$iwC.\u003cinit\u003e(\u003cconsole\u003e:100)\n\tat $iwC$$iwC.\u003cinit\u003e(\u003cconsole\u003e:102)\n\tat $iwC.\u003cinit\u003e(\u003cconsole\u003e:104)\n\tat \u003cinit\u003e(\u003cconsole\u003e:106)\n\tat .\u003cinit\u003e(\u003cconsole\u003e:110)\n\tat .\u003cclinit\u003e(\u003cconsole\u003e)\n\tat .\u003cinit\u003e(\u003cconsole\u003e:7)\n\tat .\u003cclinit\u003e(\u003cconsole\u003e)\n\tat $print(\u003cconsole\u003e)\n\tat sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\n\tat sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\n\tat sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\n\tat java.lang.reflect.Method.invoke(Method.java:497)\n\tat org.apache.spark.repl.SparkIMain$ReadEvalPrint.call(SparkIMain.scala:1065)\n\tat org.apache.spark.repl.SparkIMain$Request.loadAndRun(SparkIMain.scala:1346)\n\tat org.apache.spark.repl.SparkIMain.loadAndRunReq$1(SparkIMain.scala:840)\n\tat org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:871)\n\tat org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:819)\n\tat org.apache.zeppelin.spark.SparkInterpreter.interpretInput(SparkInterpreter.java:709)\n\tat org.apache.zeppelin.spark.SparkInterpreter.interpret(SparkInterpreter.java:674)\n\tat org.apache.zeppelin.spark.SparkInterpreter.interpret(SparkInterpreter.java:667)\n\tat org.apache.zeppelin.interpreter.ClassloaderInterpreter.interpret(ClassloaderInterpreter.java:57)\n\tat org.apache.zeppelin.interpreter.LazyOpenInterpreter.interpret(LazyOpenInterpreter.java:93)\n\tat org.apache.zeppelin.interpreter.remote.RemoteInterpreterServer$InterpretJob.jobRun(RemoteInterpreterServer.java:300)\n\tat org.apache.zeppelin.scheduler.Job.run(Job.java:169)\n\tat org.apache.zeppelin.scheduler.FIFOScheduler$1.run(FIFOScheduler.java:134)\n\tat java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)\n\tat java.util.concurrent.FutureTask.run(FutureTask.java:266)\n\tat java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.access$201(ScheduledThreadPoolExecutor.java:180)\n\tat java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:293)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)\n\tat java.lang.Thread.run(Thread.java:745)\n\n"
164+
"msg": "\u003cconsole\u003e:28: error: not found: value model\n val recommendationsDF \u003d model.transform(itemRatingsDF.select($\"userId\", $\"itemId\"))\n ^\n"
165165
},
166166
"dateCreated": "Jul 4, 2015 2:51:32 AM",
167-
"dateStarted": "Jan 16, 2016 4:40:54 PM",
168-
"dateFinished": "Jan 16, 2016 4:40:59 PM",
169-
"status": "FINISHED",
167+
"dateStarted": "Feb 12, 2016 4:38:33 AM",
168+
"dateFinished": "Feb 12, 2016 4:38:33 AM",
169+
"status": "ERROR",
170170
"progressUpdateIntervalMs": 500
171171
},
172172
{
173173
"text": "z.show(enrichedRecommendationsDF.select($\"userId\", $\"itemId\", $\"confidence\", $\"title\", $\"img\").limit(10))",
174-
"dateUpdated": "Jan 16, 2016 4:40:54 PM",
174+
"dateUpdated": "Feb 12, 2016 4:38:33 AM",
175175
"config": {
176176
"colWidth": 12.0,
177177
"graph": {
@@ -216,19 +216,19 @@
216216
"jobName": "paragraph_1443377582412_2075344434",
217217
"id": "20150927-181302_1140885708",
218218
"result": {
219-
"code": "SUCCESS",
220-
"type": "TABLE",
221-
"msg": "userId\titemId\tconfidence\ttitle\timg\n22\t43\t0.9967797\tJava\timg/software/java.png\n22\t66\t0.99488777\tRedshift\timg/software/redshift.png\n22\t49\t0.99466026\tAmazon Web Services\timg/software/aws.png\n22\t77\t0.9943304\tS3\timg/software/s3.png\n22\t52\t0.9943008\tJSON\timg/software/json.png\n22\t21\t0.99397665\tElasticSearch\timg/software/elasticsearch.png\n22\t64\t0.9937968\tCSV\timg/software/csv.png\n22\t39\t0.99280584\tTableau\timg/software/tableau.png\n22\t68\t0.9927822\tDynamoDB\timg/software/dynamodb.png\n22\t60\t0.9926167\tMySQL\timg/software/mysql.png\n"
219+
"code": "ERROR",
220+
"type": "TEXT",
221+
"msg": "\u003cconsole\u003e:31: error: not found: value enrichedRecommendationsDF\n z.show(enrichedRecommendationsDF.select($\"userId\", $\"itemId\", $\"confidence\", $\"title\", $\"img\").limit(10))\n ^\n"
222222
},
223223
"dateCreated": "Sep 27, 2015 6:13:02 PM",
224-
"dateStarted": "Jan 16, 2016 4:40:59 PM",
225-
"dateFinished": "Jan 16, 2016 4:41:04 PM",
226-
"status": "FINISHED",
224+
"dateStarted": "Feb 12, 2016 4:38:33 AM",
225+
"dateFinished": "Feb 12, 2016 4:38:33 AM",
226+
"status": "ERROR",
227227
"progressUpdateIntervalMs": 500
228228
},
229229
{
230230
"text": "import org.elasticsearch.spark.sql._ \nimport org.apache.spark.sql.SaveMode\n\nval esConfig \u003d Map(\"pushdown\" -\u003e \"true\", \"es.nodes\" -\u003e \"127.0.0.1\", \"es.port\" -\u003e \"9200\")\nenrichedRecommendationsDF.write.format(\"org.elasticsearch.spark.sql\").mode(SaveMode.Overwrite).options(esConfig)\n .save(\"advancedspark/personalized-als\")",
231-
"dateUpdated": "Jan 16, 2016 4:40:54 PM",
231+
"dateUpdated": "Feb 12, 2016 4:38:33 AM",
232232
"config": {
233233
"colWidth": 12.0,
234234
"graph": {
@@ -251,18 +251,18 @@
251251
"jobName": "paragraph_1438113388648_-491234562",
252252
"id": "20150728-195628_1365871289",
253253
"result": {
254-
"code": "SUCCESS",
254+
"code": "ERROR",
255255
"type": "TEXT",
256-
"msg": "import org.elasticsearch.spark.sql._\nimport org.apache.spark.sql.SaveMode\nesConfig: scala.collection.immutable.Map[String,String] \u003d Map(pushdown -\u003e true, es.nodes -\u003e 127.0.0.1, es.port -\u003e 9200)\n"
256+
"msg": "import org.elasticsearch.spark.sql._\nimport org.apache.spark.sql.SaveMode\nesConfig: scala.collection.immutable.Map[String,String] \u003d Map(pushdown -\u003e true, es.nodes -\u003e 127.0.0.1, es.port -\u003e 9200)\n\u003cconsole\u003e:35: error: not found: value enrichedRecommendationsDF\n enrichedRecommendationsDF.write.format(\"org.elasticsearch.spark.sql\").mode(SaveMode.Overwrite).options(esConfig)\n ^\n"
257257
},
258258
"dateCreated": "Jul 28, 2015 7:56:28 PM",
259-
"dateStarted": "Jan 16, 2016 4:40:59 PM",
260-
"dateFinished": "Jan 16, 2016 4:41:27 PM",
261-
"status": "FINISHED",
259+
"dateStarted": "Feb 12, 2016 4:38:33 AM",
260+
"dateFinished": "Feb 12, 2016 4:38:34 AM",
261+
"status": "ERROR",
262262
"progressUpdateIntervalMs": 500
263263
},
264264
{
265-
"dateUpdated": "Jan 16, 2016 4:40:54 PM",
265+
"dateUpdated": "Feb 12, 2016 4:38:33 AM",
266266
"config": {
267267
"colWidth": 12.0,
268268
"graph": {
@@ -288,8 +288,8 @@
288288
"type": "TEXT"
289289
},
290290
"dateCreated": "Dec 25, 2015 7:08:18 PM",
291-
"dateStarted": "Jan 16, 2016 4:41:04 PM",
292-
"dateFinished": "Jan 16, 2016 4:41:27 PM",
291+
"dateStarted": "Feb 12, 2016 4:38:34 AM",
292+
"dateFinished": "Feb 12, 2016 4:38:34 AM",
293293
"status": "FINISHED",
294294
"progressUpdateIntervalMs": 500
295295
}

data_persist/zeppelin/2B68NAUVG/note.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -573,7 +573,7 @@
573573
"progressUpdateIntervalMs": 500
574574
}
575575
],
576-
"name": "TODO: Live Recs/04: Generate Item-to-Item Tag-Similarity Graph Recs",
576+
"name": "TODO: Live Recs/04: Generate Item-to-Item Tag-Similarity Graph-based Recs",
577577
"id": "2B68NAUVG",
578578
"angularObjects": {
579579
"2ARR8UZDJ": [],

data_persist/zeppelin/2B7RXDS6A/note.json

Lines changed: 36 additions & 36 deletions
Large diffs are not rendered by default.

data_persist/zeppelin/2B8DEJYQS/note.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@
6262
{
6363
"title": "Tokenize Song Lyrics",
6464
"text": "import org.apache.spark.ml.feature.RegexTokenizer\n\nval tokenizer \u003d new RegexTokenizer()\n .setInputCol(\"lyrics\")\n .setOutputCol(\"words\")\n .setGaps(false)\n .setPattern(\"\\\\p{L}+\")",
65-
"dateUpdated": "Feb 4, 2016 6:37:29 AM",
65+
"dateUpdated": "Feb 5, 2016 9:48:53 PM",
6666
"config": {
6767
"colWidth": 12.0,
6868
"editorMode": "ace/mode/scala",

0 commit comments

Comments
 (0)
0