[go: up one dir, main page]

0% found this document useful (0 votes)
22 views2 pages

Interv

Uploaded by

rameshborukati
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as TXT, PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
22 views2 pages

Interv

Uploaded by

rameshborukati
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as TXT, PDF, TXT or read online on Scribd
You are on page 1/ 2

str1 = 'Hello Google'

# o/p = 'elgooG olleH'

str=str1[::-1]
print(str)

newstr=''
for i in range(len(str1)):
newstr=i+newstr

from pyspark.sql import SparkSession

spark=SparkSession.builder.appName("name").getCreate

df=spark.read.csv("path

spark.sparkcontext.readText("testfile

list2 = [(25, 3), (6, 9), (11, 5), (124, 2)]

o/p = '1,2,3,4,5,6,9'

s=set()
for i in list2:

for t in i:
if(t%10)=

emp_id emp_name manager_id


111 Rick null
222 John 111
333 Shane 111
444 Peter 222
555 Jack 222

o/p - PySpark/SQL
emp_name manager_name
John Rick
Shane Rick
Peter John
Jack John

select e.emp_name,m.emp_name from


cust_id trans_id trans_amt trans_date
101 101 15000 20220101
101 102 25000 20220105
201 103 20000 20220112
201 104 50000 20220225
201 105 15000 20220303
301 106 75000 20220318

o/p
year month highest_amt cust_id count_transactions
2022 1 25000 101 2
2022 2 50000 201 1
2022 3 75000 301 1

select substr(trans_date,1,4), extreact( mnth from to_date(trans_date format


'yyyymmdd')
max(trans_amt) over(partition by

with count_tran as
(
select cust_idtrans_amt from (
select cust_id,trans_amt ,row_number() over (partition by year,month order by
trans_amt) as rn,
from table)

You might also like