181 lines
4.9 KiB
Python
181 lines
4.9 KiB
Python
|
#!/usr/bin/python3
|
||
|
from urllib.parse import urlparse, parse_qs, unquote
|
||
|
import sys
|
||
|
import json
|
||
|
import tempfile
|
||
|
import subprocess
|
||
|
import os
|
||
|
|
||
|
import ox
|
||
|
import ox.web.auth
|
||
|
import pandora_client
|
||
|
import requests
|
||
|
|
||
|
CHUNK_SIZE = 1024 * 8
|
||
|
|
||
|
base_url = 'https://cdosea.0x2620.org'
|
||
|
|
||
|
api = None
|
||
|
|
||
|
def get_api():
|
||
|
global api
|
||
|
if not api:
|
||
|
api = pandora_client.API(base_url + '/api/')
|
||
|
r = api.signin(**ox.web.auth.get('cdosea'))
|
||
|
if r['status']['code'] != 200:
|
||
|
print('failed to signin')
|
||
|
print(r)
|
||
|
sys.exit(1)
|
||
|
|
||
|
def parse_url(url):
|
||
|
params = url.split('downloads/')[1].split('/')
|
||
|
file_id, recipient_id, security_hash = '', '', ''
|
||
|
if len(params) > 2:
|
||
|
# https://www.wetransfer.com/downloads/XXXXXXXXXX/YYYYYYYYY/ZZZZZZZZ
|
||
|
file_id, recipient_id, security_hash = params
|
||
|
else:
|
||
|
# The url is similar to https://www.wetransfer.com/downloads/XXXXXXXXXX/ZZZZZZZZ
|
||
|
file_id, security_hash = params
|
||
|
return file_id, recipient_id, security_hash
|
||
|
|
||
|
def download(url):
|
||
|
# resolve redirect
|
||
|
url = requests.get(url).url
|
||
|
file_id, recipient_id, security_hash = parse_url(url)
|
||
|
|
||
|
url_t = 'https://wetransfer.com/api/ui/transfers/{file_id}/{security_hash}/download?recipient_id={recipient_id}'
|
||
|
|
||
|
url = url_t.format(
|
||
|
file_id=file_id,
|
||
|
recipient_id=recipient_id,
|
||
|
security_hash=security_hash
|
||
|
)
|
||
|
r = requests.get(url)
|
||
|
download_data = r.json()
|
||
|
|
||
|
print('Downloading {0}...'.format(url))
|
||
|
offset = 0
|
||
|
if 'direct_link' in download_data:
|
||
|
direct_link_path = urlparse(download_data['direct_link']).path
|
||
|
direct_link_path = direct_link_path.split('/')
|
||
|
filename = unquote(direct_link_path[-1])
|
||
|
# FIXME: does wetransfer support Range requests?
|
||
|
'''
|
||
|
if os.path.exists(filename):
|
||
|
offset = os.path.getsize(filename)
|
||
|
resume_header = {'Range': 'bytes=%d-' % offset}
|
||
|
r = requests.get(download_data['direct_link'], headers=resume_header, stream=True)
|
||
|
else:
|
||
|
'''
|
||
|
r = requests.get(download_data['direct_link'], stream=True)
|
||
|
else:
|
||
|
filename = unquote(download_data['fields']['filename'])
|
||
|
r = requests.post(
|
||
|
download_data['formdata']['action'],
|
||
|
data=download_data['fields'],
|
||
|
stream=True
|
||
|
)
|
||
|
|
||
|
file_size = int(r.headers['Content-Length'])
|
||
|
if offset:
|
||
|
outputfile = open(filename, 'ab')
|
||
|
else:
|
||
|
outputfile = open(filename, 'wb')
|
||
|
counter = 0
|
||
|
for chunk in r.iter_content(chunk_size=CHUNK_SIZE):
|
||
|
if chunk:
|
||
|
outputfile.write(chunk)
|
||
|
outputfile.flush()
|
||
|
done = counter * CHUNK_SIZE + offset
|
||
|
sys.stdout.write(
|
||
|
'\r%0.2f%% %s/%s' % (
|
||
|
done * 100/file_size,
|
||
|
done,
|
||
|
file_size
|
||
|
)
|
||
|
)
|
||
|
counter += 1
|
||
|
outputfile.close()
|
||
|
if os.path.getsize(filename) != file_size:
|
||
|
print('\nFailed to download')
|
||
|
sys.exit(1)
|
||
|
sys.stdout.write('\r100% {0}/{1}\n'.format(file_size, file_size))
|
||
|
print('Finished! {0}'.format(filename))
|
||
|
return filename
|
||
|
|
||
|
|
||
|
def upload(filename):
|
||
|
if os.path.exists(filename):
|
||
|
# files > 4GB fail with unzip
|
||
|
#subprocess.call(['unzip', filename])
|
||
|
subprocess.call(['jar', 'xf', filename])
|
||
|
|
||
|
ids = []
|
||
|
|
||
|
for root, folders, files in os.walk('.'):
|
||
|
for f in ox.sorted_strings(files):
|
||
|
if f.endswith('.zip') or f.startswith('._'):
|
||
|
continue
|
||
|
f = os.path.join(root, f)
|
||
|
if '__MACOSX' in f:
|
||
|
continue
|
||
|
h = upload_file(f)
|
||
|
ids.append(h)
|
||
|
|
||
|
get_api()
|
||
|
items = set()
|
||
|
for id in ids:
|
||
|
r = api.getMediaInfo(id=id)['data']
|
||
|
items.add(r['item'])
|
||
|
|
||
|
l = api.addList(name=os.path.basename(filename))['data']['id']
|
||
|
api.addListItems(list=l, items=list(items))
|
||
|
|
||
|
def upload_file(f):
|
||
|
get_api()
|
||
|
|
||
|
filename = os.path.basename(f)
|
||
|
|
||
|
# register file with pan.do/ra
|
||
|
info = ox.avinfo(f)
|
||
|
oshash = info['oshash']
|
||
|
r = api.getMediaInfo(id=oshash)['data']
|
||
|
if 'item' in r:
|
||
|
return oshash
|
||
|
if 'path' in info:
|
||
|
del info['path']
|
||
|
r = api.addMedia({
|
||
|
'id': oshash,
|
||
|
'filename': filename,
|
||
|
'info': info
|
||
|
})
|
||
|
|
||
|
# dont upload again if file is known
|
||
|
if r['status']['text'] == 'file exists':
|
||
|
return oshash
|
||
|
|
||
|
# upload media file
|
||
|
tmp = tempfile.gettempdir()
|
||
|
api._resume_file = os.path.join(tmp, 'pandora_client.%s.%s.json' % (os.environ.get('USER'), oshash))
|
||
|
url = '%s/api/upload/direct/' % base_url
|
||
|
r = api.upload_chunks(url, f, {
|
||
|
'id': oshash
|
||
|
})
|
||
|
return oshash
|
||
|
|
||
|
|
||
|
def main(url):
|
||
|
if url.startswith('http'):
|
||
|
filename = download(url)
|
||
|
else:
|
||
|
filename = url
|
||
|
upload(filename)
|
||
|
|
||
|
|
||
|
if __name__ == '__main__':
|
||
|
argv = sys.argv[1:]
|
||
|
if len(argv) != 1:
|
||
|
print('%s url' % sys.argv[0])
|
||
|
sys.exit(1)
|
||
|
main(argv[0])
|