refactor
This commit is contained in:
parent
1ad3740677
commit
6ef42c9fee
1 changed files with 68 additions and 64 deletions
132
re.py
132
re.py
|
@ -46,74 +46,78 @@ class Pandora:
|
|||
'keys': keys
|
||||
})['data']
|
||||
|
||||
def get_playlists():
|
||||
# Get all storylines with tags
|
||||
storylines = [{
|
||||
'name': entity['name'],
|
||||
'tags': entity['tags']
|
||||
} for entity in pandora.find_entities({
|
||||
'conditions': [
|
||||
{'key': 'type', 'operator': '==', 'value': 'storylines'},
|
||||
],
|
||||
'operator': '&'
|
||||
}, ['id', 'name', 'tags']) if entity.get('tags', [])]
|
||||
# Get list of storyline names
|
||||
names = list(set([storyline['name'] for storyline in storylines]))
|
||||
# Get all clips annotated with storyline references
|
||||
clips = [clip for clip in pandora.find_annotations({
|
||||
'conditions': [
|
||||
{'key': 'layer', 'operator': '==', 'value': 'storylines'}
|
||||
],
|
||||
'operator': '&'
|
||||
}, ['id', 'in', 'out', 'value']) if clip['value'] in names]
|
||||
# Get list of ids for videos with clips
|
||||
ids = list(set([clip['id'].split('/')[0] for clip in clips]))
|
||||
# Get (and cache) order (and code + name) for each video
|
||||
filename = 'json/videos.json'
|
||||
if os.path.exists(filename):
|
||||
with open(filename) as f:
|
||||
videos_ = json.loads(f.read())
|
||||
ids_ = [video['id'] for video in videos_]
|
||||
else:
|
||||
videos_, ids_ = [], []
|
||||
videos = sorted(videos_ + [
|
||||
pandora.get(id, ['code', 'id', 'order', 'title'])
|
||||
for id in ids if not id in ids_
|
||||
], key=lambda video: video['order'])
|
||||
with open(filename, 'w') as f:
|
||||
f.write(json.dumps(videos, indent=4, sort_keys=True))
|
||||
order = {video['id']: video['order'] for video in videos}
|
||||
print(order)
|
||||
# Sort clips
|
||||
clips = sorted(
|
||||
clips,
|
||||
key=lambda clip: order[clip['id'].split('/')[0]] * 1000000 + clip['in']
|
||||
)
|
||||
# Return playlists
|
||||
return [playlist for playlist in [{
|
||||
'name': storyline['name'],
|
||||
'tags': storyline['tags'],
|
||||
'clips': [
|
||||
'{}_{:.3f}-{:.3f}'.format(
|
||||
clip['id'].split('/')[0], clip['in'], clip['out']
|
||||
) for clip in clips if clip['value'] == storyline['name']
|
||||
]
|
||||
} for storyline in storylines] if playlist['clips']]
|
||||
class Engine:
|
||||
|
||||
def get_videos(user):
|
||||
products = []
|
||||
for event in user['events']:
|
||||
if 'product' in event['data']:
|
||||
products.append(event['data']['product'])
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def shift_clips(clips):
|
||||
index = random.randrange(len(clips))
|
||||
return clips[index:] + clips[:index - 1]
|
||||
def _shift_clips(clips):
|
||||
index = random.randrange(len(clips))
|
||||
return clips[index:] + clips[:index - 1]
|
||||
|
||||
def get_videos(user):
|
||||
products = []
|
||||
for event in user['events']:
|
||||
if 'product' in event['data']:
|
||||
products.append(event['data']['product'])
|
||||
|
||||
def update(self):
|
||||
# Get all storylines with tags
|
||||
storylines = [{
|
||||
'name': entity['name'],
|
||||
'tags': entity['tags']
|
||||
} for entity in pandora.find_entities({
|
||||
'conditions': [
|
||||
{'key': 'type', 'operator': '==', 'value': 'storylines'},
|
||||
],
|
||||
'operator': '&'
|
||||
}, ['id', 'name', 'tags']) if entity.get('tags', [])]
|
||||
# Get list of storyline names
|
||||
names = list(set([storyline['name'] for storyline in storylines]))
|
||||
# Get all clips annotated with storyline references
|
||||
clips = [clip for clip in pandora.find_annotations({
|
||||
'conditions': [
|
||||
{'key': 'layer', 'operator': '==', 'value': 'storylines'}
|
||||
],
|
||||
'operator': '&'
|
||||
}, ['id', 'in', 'out', 'value']) if clip['value'] in names]
|
||||
# Get list of ids for videos with clips
|
||||
ids = list(set([clip['id'].split('/')[0] for clip in clips]))
|
||||
# Get (and cache) order (and code + name) for each video
|
||||
filename = 'json/videos.json'
|
||||
if os.path.exists(filename):
|
||||
with open(filename) as f:
|
||||
videos_ = json.loads(f.read())
|
||||
ids_ = [video['id'] for video in videos_]
|
||||
else:
|
||||
videos_, ids_ = [], []
|
||||
videos = sorted(videos_ + [
|
||||
pandora.get(id, ['code', 'id', 'order', 'title'])
|
||||
for id in ids if not id in ids_
|
||||
], key=lambda video: video['order'])
|
||||
with open(filename, 'w') as f:
|
||||
f.write(json.dumps(videos, indent=4, sort_keys=True))
|
||||
order = {video['id']: video['order'] for video in videos}
|
||||
# Sort clips
|
||||
clips = sorted(
|
||||
clips,
|
||||
key=lambda clip: order[clip['id'].split('/')[0]] * 1000000 + clip['in']
|
||||
)
|
||||
# Return playlists
|
||||
return [playlist for playlist in [{
|
||||
'name': storyline['name'],
|
||||
'tags': storyline['tags'],
|
||||
'clips': [
|
||||
'{}_{:.3f}-{:.3f}'.format(
|
||||
clip['id'].split('/')[0], clip['in'], clip['out']
|
||||
) for clip in clips if clip['value'] == storyline['name']
|
||||
]
|
||||
} for storyline in storylines] if playlist['clips']]
|
||||
|
||||
if __name__ == '__main__':
|
||||
pandora = Pandora()
|
||||
playlists = get_playlists()
|
||||
with open('playlists.json', 'w') as f:
|
||||
engine = Engine()
|
||||
playlists = engine.update()
|
||||
with open('json/playlists.json', 'w') as f:
|
||||
f.write(json.dumps(playlists, indent=4, sort_keys=True))
|
||||
print(len(playlists), 'playlists')
|
||||
|
||||
|
|
Loading…
Reference in a new issue