flake8 + map->[]

This commit is contained in:
j 2016-06-14 21:06:27 +02:00
parent 93734a4fbc
commit fd9d3bdabf
2 changed files with 114 additions and 100 deletions

View file

@ -142,10 +142,17 @@ def get_item(info, user=None, async=False):
tasks.update_poster.delay(item.public_id)
return item
def get_path(f, x): return f.path(x)
def get_icon_path(f, x): return get_path(f, 'icon.jpg')
def get_poster_path(f, x): return get_path(f, 'poster.jpg')
def get_torrent_path(f, x): return get_path(f, 'torrent.torrent')
def get_path(f, x):
return f.path(x)
def get_icon_path(f, x):
return get_path(f, 'icon.jpg')
def get_poster_path(f, x):
return get_path(f, 'poster.jpg')
def get_torrent_path(f, x):
return get_path(f, 'torrent.torrent')
class Item(models.Model):
created = models.DateTimeField(auto_now_add=True)
@ -154,9 +161,9 @@ class Item(models.Model):
user = models.ForeignKey(User, null=True, related_name='items')
groups = models.ManyToManyField(Group, blank=True, related_name='items')
#while metadata is updated, files are set to rendered=False
# while metadata is updated, files are set to rendered=False
rendered = models.BooleanField(default=False, db_index=True)
#should be set based on user
# should be set based on user
level = models.IntegerField(db_index=True)
public_id = models.CharField(max_length=128, unique=True, blank=True)
@ -175,7 +182,7 @@ class Item(models.Model):
torrent = models.FileField(default=None, blank=True, max_length=1000, upload_to=get_torrent_path)
stream_info = fields.DictField(default={}, editable=False)
#stream related fields
# stream related fields
stream_aspect = models.FloatField(default=4/3)
objects = managers.ItemManager()
@ -227,9 +234,9 @@ class Item(models.Model):
def edit(self, data):
data = data.copy()
#FIXME: how to map the keys to the right place to write them to?
# FIXME: how to map the keys to the right place to write them to?
if 'id' in data:
#FIXME: check if id is valid and exists and move/merge items accordingly
# FIXME: check if id is valid and exists and move/merge items accordingly
del data['id']
if 'groups' in data:
groups = data.pop('groups')
@ -354,7 +361,7 @@ class Item(models.Model):
if not settings.USE_IMDB:
self.public_id = ox.toAZ(self.id)
#this does not work if another item without imdbid has the same metadata
# this does not work if another item without imdbid has the same metadata
oxdbId = self.oxdb_id()
if not settings.USE_IMDB:
self.oxdbId = None
@ -379,10 +386,10 @@ class Item(models.Model):
if len(self.public_id) != 7:
update_ids = True
#id changed, what about existing item with new id?
# id changed, what about existing item with new id?
if settings.USE_IMDB and len(self.public_id) != 7 and self.oxdbId != self.public_id:
self.public_id = self.oxdbId
#FIXME: move files to new id here
# FIXME: move files to new id here
if settings.USE_IMDB and len(self.public_id) == 7:
for key in ('title', 'year', 'director', 'season', 'episode',
'seriesTitle', 'episodeTitle'):
@ -415,7 +422,8 @@ class Item(models.Model):
self.update_sort()
self.update_facets()
if update_ids:
for c in self.clips.all(): c.save()
for c in self.clips.all():
c.save()
for a in self.annotations.all():
public_id = a.public_id.split('/')[1]
public_id = "%s/%s" % (self.public_id, public_id)
@ -455,7 +463,7 @@ class Item(models.Model):
self.delete()
if save:
other.save()
#FIXME: update poster, stills and streams after this
# FIXME: update poster, stills and streams after this
def merge_streams(self, output, resolution=None, format="webm"):
streams = [s.get(resolution, format).media.path for s in self.streams()]
@ -569,7 +577,7 @@ class Item(models.Model):
if not keys or key in keys:
if key not in i:
value = self.get(key)
#also get values from sort table, i.e. numberof values
# also get values from sort table, i.e. numberof values
if not value:
try:
if self.sort and hasattr(self.sort, key):
@ -582,7 +590,7 @@ class Item(models.Model):
if 'cast' in i and isinstance(i['cast'][0], basestring):
i['cast'] = [i['cast']]
if 'cast' in i and isinstance(i['cast'][0], list):
i['cast'] = map(lambda x: {'actor': x[0], 'character': x[1]}, i['cast'])
i['cast'] = [{'actor': x[0], 'character': x[1]} for x in i['cast']]
if 'connections' in i:
i['connections'] = self.expand_connections()
@ -617,7 +625,7 @@ class Item(models.Model):
if k in i:
del i[k]
#only needed by admins
# only needed by admins
if keys and 'posters' in keys:
i['posters'] = self.get_posters()
@ -798,10 +806,9 @@ class Item(models.Model):
values = list(set(values))
elif key == 'name':
values = []
for k in map(lambda x: x['id'],
filter(lambda x: x.get('sortType') == 'person',
settings.CONFIG['itemKeys'])):
values += self.get(k, [])
for k in settings.CONFIG['itemKeys']:
if k.get('sortType') == 'person':
values += self.get(k['id'], [])
values = list(set(values))
else:
values = self.get(key, '')
@ -812,8 +819,8 @@ class Item(models.Model):
isSeries = self.get('series',
self.get('episodeTitle',
self.get('episode',
self.get('seriesTitle')))) is not None
self.get('episode',
self.get('seriesTitle')))) is not None
save('series', isSeries)
def update_sort(self):
@ -882,7 +889,7 @@ class Item(models.Model):
'volume',
)
#sort keys based on database, these will always be available
# sort keys based on database, these will always be available
s.public_id = self.public_id.replace('0x', 'xx')
s.oxdbId = self.oxdbId
if not settings.USE_IMDB and s.public_id.isupper() and s.public_id.isalpha():
@ -898,7 +905,7 @@ class Item(models.Model):
s.numberoffiles = self.files.all().count()
videos = self.files.filter(selected=True).filter(Q(is_video=True) | Q(is_audio=True))
if videos.count() > 0:
#s.duration = sum([v.duration for v in videos])
# s.duration = sum([v.duration for v in videos])
s.duration = sum([v.duration for v in self.streams()])
v = videos[0]
if v.is_audio or not v.info.get('video'):
@ -914,7 +921,7 @@ class Item(models.Model):
s.aspectratio = float(utils.parse_decimal(v.display_aspect_ratio))
s.pixels = sum([v.pixels for v in videos])
s.parts = videos.count()
s.size = sum([v.size for v in videos]) #FIXME: only size of movies?
s.size = sum([v.size for v in videos]) # FIXME: only size of movies?
if s.duration:
s.bitrate = s.size * 8 / s.duration
else:
@ -980,7 +987,7 @@ class Item(models.Model):
value = value / (s.duration / 60) if value and s.duration else None
set_value(s, name, value)
elif sort_type in ('length', 'integer', 'time', 'float'):
#can be length of strings or length of arrays, i.e. keywords
# can be length of strings or length of arrays, i.e. keywords
if 'layer' in key.get('value', []):
value = self.annotations.filter(layer=key['value']['layer']).count()
else:
@ -1019,11 +1026,9 @@ class Item(models.Model):
for item in sublist]
elif key == 'name':
current_values = []
#FIXME: is there a better way to build name collection?
for k in map(lambda x: x['id'],
filter(lambda x: x.get('sortType') == 'person',
settings.CONFIG['itemKeys'])):
current_values += self.get(k, [])
for k in settings.CONFIG['itemKeys']:
if k.get('sortType') == 'person':
current_values += self.get(k['id'], [])
if not isinstance(current_values, list):
if not current_values:
current_values = []
@ -1049,8 +1054,10 @@ class Item(models.Model):
for a in self.annotations.filter(layer=key).distinct().values('value')]
layer = utils.get_by_id(settings.CONFIG['layers'], key)
if layer.get('type') == 'entity':
current_values = [a['name']
for a in Entity.objects.filter(id__in=[ox.fromAZ(i) for i in current_values]).values('name')]
current_values = [
a['name']
for a in Entity.objects.filter(id__in=[ox.fromAZ(i) for i in current_values]).values('name')
]
current_values = [ox.decode_html(ox.strip_tags(v.replace('<br>', ' '))) for v in current_values]
self.update_facet_values(key, current_values)
@ -1138,8 +1145,8 @@ class Item(models.Model):
return User.objects.filter(
volumes__files__file__item=self
).order_by('date_joined').distinct()
#FIXME: profile not showing up here
#).order_by('-profile__level', 'date_joined').distinct()
# FIXME: profile not showing up here
# ).order_by('-profile__level', 'date_joined').distinct()
def sets(self):
sets = []
@ -1224,8 +1231,8 @@ class Item(models.Model):
media_path = v.media.path
extension = media_path.split('.')[-1]
url = "%s/torrent/%s.%s" % (self.get_absolute_url(),
quote(filename.encode('utf-8')),
extension)
quote(filename.encode('utf-8')),
extension)
video = "%s.%s" % (base, extension)
if isinstance(media_path, unicode):
media_path = media_path.encode('utf-8')
@ -1264,10 +1271,10 @@ class Item(models.Model):
if duration:
meta['playtime'] = ox.format_duration(duration*1000)[:-4]
#slightly bigger torrent file but better for streaming
piece_size_pow2 = 15 #1 mbps -> 32KB pieces
# slightly bigger torrent file but better for streaming
piece_size_pow2 = 15 # 1 mbps -> 32KB pieces
if size / duration >= 1000000:
piece_size_pow2 = 16 #2 mbps -> 64KB pieces
piece_size_pow2 = 16 # 2 mbps -> 64KB pieces
meta['piece_size_pow2'] = piece_size_pow2
ox.torrent.create_torrent(video, settings.TRACKER_URL, meta)
@ -1313,12 +1320,12 @@ class Item(models.Model):
n = streams.count()
for s in streams:
self.data['volume'] += s.volume * s.duration
color = map(lambda a, b: (a+b)/n, color, ox.image.getRGB(s.color or [0.0] * 3))
color = [(a+b)/n for a, b in zip(color, ox.image.getRGB([1.0] * 3))]
offset += s.duration
self.data['hue'], self.data['saturation'], self.data['lightness'] = ox.image.getHSL(color)
if offset:
self.data['volume'] /= offset
#extract.timeline_strip(self, self.data['cuts'], stream.info, self.timeline_prefix[:-8])
# extract.timeline_strip(self, self.data['cuts'], stream.info, self.timeline_prefix[:-8])
self.json = self.get_json()
self.update_sort()
self.select_frame()
@ -1344,7 +1351,8 @@ class Item(models.Model):
if self.json.get('posterRatio') != self.poster_width / self.poster_height:
self.json = self.get_json()
Item.objects.filter(id=self.id).update(json=self.json,
poster_width=self.poster_width, poster_height=self.poster_height)
poster_width=self.poster_width,
poster_height=self.poster_height)
def prefered_poster_url(self):
if settings.DATA_SERVICE:
@ -1368,7 +1376,7 @@ class Item(models.Model):
durations.append(s.duration)
join_tiles(timelines, durations, self.timeline_prefix)
else:
#remove joined timeline if it was created at some point
# remove joined timeline if it was created at some point
for f in glob(os.path.join(settings.MEDIA_ROOT, self.path(), 'timeline*.jpg')):
os.unlink(f)
@ -1438,7 +1446,8 @@ class Item(models.Model):
width, height = self.json['resolution']
if width and height:
pos = self.sort.duration / 2
for p in map(int, [pos/2, pos, pos+pos/2]):
for p in [pos/2, pos, pos+pos/2]:
p = int(p)
path = self.frame(p, height)
if path:
frames.append({
@ -1489,7 +1498,7 @@ class Item(models.Model):
cmd += ['-f', frame]
p = subprocess.Popen(cmd, close_fds=True)
p.wait()
#remove cached versions
# remove cached versions
icon = os.path.abspath(os.path.join(settings.MEDIA_ROOT, icon))
for f in glob(icon.replace('.jpg', '*.jpg')):
if f != icon:
@ -1503,10 +1512,10 @@ class Item(models.Model):
subtitles = utils.get_by_key(settings.CONFIG['layers'], 'isSubtitles', True)
if not subtitles:
return
#otherwise add empty 5 seconds annotation every minute
# otherwise add empty 5 seconds annotation every minute
duration = sum([s.duration for s in self.streams()])
layer = subtitles['id']
#FIXME: allow annotations from no user instead?
# FIXME: allow annotations from no user instead?
user = User.objects.all().order_by('id')[0]
clips = [(i, i+5) for i in range(0, int(duration) - 5, 60)]
@ -1556,7 +1565,7 @@ class Item(models.Model):
else:
language = languages[0]
#loop over all videos
# loop over all videos
for f in self.files.filter(Q(is_audio=True) | Q(is_video=True)) \
.filter(selected=True).order_by('sort_path'):
subtitles_added = False
@ -1564,9 +1573,9 @@ class Item(models.Model):
if f.instances.all().count() > 0:
user = f.instances.all()[0].volume.user
else:
#FIXME: allow annotations from no user instead?
# FIXME: allow annotations from no user instead?
user = User.objects.all().order_by('id')[0]
#if there is a subtitle with the same prefix, import
# if there is a subtitle with the same prefix, import
q = subtitles.filter(path__startswith=prefix,
language=language)
if q.count() == 1:
@ -1586,7 +1595,7 @@ class Item(models.Model):
user=user
)
annotation.save()
#otherwise add empty 5 seconds annotation every minute
# otherwise add empty 5 seconds annotation every minute
if not subtitles_added:
start = offset and int(offset / 60) * 60 + 60 or 0
for i in range(start,
@ -1602,7 +1611,7 @@ class Item(models.Model):
)
annotation.save()
offset += f.duration
#remove left over clips without annotations
# remove left over clips without annotations
Clip.objects.filter(item=self, annotations__id=None).delete()
return True
else:

View file

@ -80,7 +80,7 @@ def _order_by_group(query):
def parse_query(data, user):
query = {}
query['range'] = [0, 100]
query['sort'] = [{'key':'title', 'operator':'+'}]
query['sort'] = [{'key': 'title', 'operator': '+'}]
for key in ('sort', 'keys', 'group', 'range', 'position', 'positions'):
if key in data:
query[key] = data[key]
@ -96,7 +96,7 @@ def parse_query(data, user):
if not query['clip_keys']:
query['clip_keys'] = ['id', 'in', 'out', 'annotations']
#group by only allows sorting by name or number of itmes
# group by only allows sorting by name or number of itmes
return query
def find(request, data):
@ -228,7 +228,7 @@ def find(request, data):
def only_p_sums(m):
r = {}
for p in _p:
if p == 'accessed':
if p == 'accessed':
r[p] = m.sort.accessed or ''
elif p == 'modified':
r[p] = m.sort.modified
@ -239,6 +239,7 @@ def find(request, data):
if 'clip_qs' in query:
r['clips'] = get_clips(query['clip_qs'].filter(item=m))
return r
def only_p(m):
r = {}
if m:
@ -250,7 +251,7 @@ def find(request, data):
r['clips'] = get_clips(query['clip_qs'].filter(item__public_id=m['id']))
return r
qs = qs[query['range'][0]:query['range'][1]]
#response['data']['items'] = [m.get_json(_p) for m in qs]
# response['data']['items'] = [m.get_json(_p) for m in qs]
if filter(lambda p: p in (
'accessed', 'modified', 'timesaccessed', 'viewed'
), _p):
@ -259,7 +260,7 @@ def find(request, data):
else:
response['data']['items'] = [only_p(m['json']) for m in qs.values('json')]
else: # otherwise stats
else: # otherwise stats
items = query['qs']
files = File.objects.filter(item__in=items).filter(selected=True).filter(size__gt=0)
r = files.aggregate(
@ -267,9 +268,10 @@ def find(request, data):
Sum('pixels'),
Sum('size')
)
totals = [i['id']
totals = [
i['id']
for i in settings.CONFIG['totals']
if not 'capability' in i or has_capability(request.user, i['capability'])
if 'capability' not in i or has_capability(request.user, i['capability'])
]
if 'duration' in totals:
response['data']['duration'] = r['duration__sum']
@ -284,8 +286,8 @@ def find(request, data):
if 'size' in totals:
response['data']['size'] = r['size__sum']
for key in ('runtime', 'duration', 'pixels', 'size'):
if key in totals and response['data'][key] == None:
response['data'][key] = 0
if key in totals and response['data'][key] is None:
response['data'][key] = 0
return render_to_json_response(response)
actions.register(find)
@ -305,7 +307,7 @@ def autocomplete(request, data):
}
see: autocompleteEntities
'''
if not 'range' in data:
if 'range' not in data:
data['range'] = [0, 10]
op = data.get('operator', '=')
@ -313,7 +315,8 @@ def autocomplete(request, data):
order_by = key.get('autocompleteSort', False)
if order_by:
for o in order_by:
if o['operator'] != '-': o['operator'] = ''
if o['operator'] != '-':
o['operator'] = ''
order_by = ['%(operator)ssort__%(key)s' % o for o in order_by]
else:
order_by = ['-items']
@ -375,8 +378,8 @@ def findId(request, data):
]
if not response['data']['items'] \
and settings.USE_IMDB \
and settings.DATA_SERVICE:
and settings.USE_IMDB \
and settings.DATA_SERVICE:
r = models.external_data('getId', data)
if r['status']['code'] == 200:
response['data']['items'] = [r['data']]
@ -479,14 +482,14 @@ def get(request, data):
info['groups'] = [g.name for g in item.groups.all()]
for k in settings.CONFIG['itemKeys']:
if 'capability' in k \
and not (item.editable(request.user) or has_capability(request.user, k['capability'])) \
and k['id'] in info \
and k['id'] not in ('parts', 'durations', 'duration'):
del info[k['id']]
and not (item.editable(request.user) or has_capability(request.user, k['capability'])) \
and k['id'] in info \
and k['id'] not in ('parts', 'durations', 'duration'):
del info[k['id']]
info['editable'] = item.editable(request.user)
response['data'] = info
else:
#response = json_response(status=403, text='permission denied')
# response = json_response(status=403, text='permission denied')
response = json_response(status=404, text='not found')
return render_to_json_response(response)
actions.register(get)
@ -544,14 +547,14 @@ def edit(request, data):
if item.editable(request.user):
response = json_response(status=200, text='ok')
if 'rightslevel' in data:
if request.user.profile.capability('canEditRightsLevel') == True:
if request.user.profile.capability('canEditRightsLevel'):
item.level = int(data['rightslevel'])
else:
response = json_response(status=403, text='permission denied')
del data['rightslevel']
if 'user' in data:
if request.user.profile.get_level() in ('admin', 'staff') and \
models.User.objects.filter(username=data['user']).exists():
models.User.objects.filter(username=data['user']).exists():
new_user = models.User.objects.get(username=data['user'])
if new_user != item.user:
item.user = new_user
@ -590,12 +593,12 @@ def remove(request, data):
response = json_response({})
item = get_object_or_404_json(models.Item, public_id=data['id'])
user = request.user
if user.profile.capability('canRemoveItems') == True or \
user.is_staff or \
item.user == user or \
item.groups.filter(id__in=user.groups.all()).count() > 0:
if user.profile.capability('canRemoveItems') or \
user.is_staff or \
item.user == user or \
item.groups.filter(id__in=user.groups.all()).count() > 0:
add_changelog(request, data)
#FIXME: is this cascading enough or do we end up with orphan files etc.
# FIXME: is this cascading enough or do we end up with orphan files etc.
item.delete()
response = json_response(status=200, text='removed')
else:
@ -715,7 +718,7 @@ def lookup(request, data):
r = {'id': i.public_id}
for key in ('title', 'director', 'year'):
value = i.get(key)
if value != None:
if value is not None:
r[key] = value
response = json_response(r)
else:
@ -749,7 +752,7 @@ def frame(request, id, size, position=None):
if not frame:
frame = os.path.join(settings.STATIC_ROOT, 'jpg/list256.jpg')
#raise Http404
# raise Http404
response = HttpFileResponse(frame, content_type='image/jpeg')
if request.method == 'OPTIONS':
response.allow_access()
@ -770,7 +773,7 @@ def poster_frame(request, id, position):
def image_to_response(image, size=None):
if size:
size = int(size)
path = image.path.replace('.jpg', '.%d.jpg'%size)
path = image.path.replace('.jpg', '.%d.jpg' % size)
if not os.path.exists(path):
image_size = max(image.width, image.height)
if size > image_size:
@ -792,7 +795,7 @@ def siteposter(request, id, size=None):
image = Image.open(poster)
image_size = max(image.size)
if size < image_size:
path = poster.replace('.jpg', '.%d.jpg'%size)
path = poster.replace('.jpg', '.%d.jpg' % size)
extract.resize_image(poster, path, size=size)
poster = path
return HttpFileResponse(poster, content_type='image/jpeg')
@ -847,6 +850,7 @@ def timeline(request, id, size, position=-1, format='jpg', mode=None):
modes.pop(modes.index(mode))
prefix = os.path.join(item.timeline_prefix, 'timeline')
def timeline():
timeline = '%s%s%sp' % (prefix, mode, size)
if position > -1:
@ -881,7 +885,7 @@ def download(request, id, resolution=None, format='webm'):
r = item.merge_streams(video.name, resolution, format)
if not r:
return HttpResponseForbidden()
elif r == True:
elif r is True:
response = HttpResponse(FileWrapper(video), content_type=content_type)
response['Content-Length'] = os.path.getsize(video.name)
else:
@ -931,8 +935,8 @@ def video(request, id, resolution, format, index=None, track=None):
raise Http404
path = stream.media.path
#server side cutting
#FIXME: this needs to join segments if needed
# server side cutting
# FIXME: this needs to join segments if needed
t = request.GET.get('t')
if t:
def parse_timestamp(s):
@ -942,7 +946,7 @@ def video(request, id, resolution, format, index=None, track=None):
t = map(parse_timestamp, t.split(','))
ext = '.%s' % format
content_type = mimetypes.guess_type(path)[0]
if len(t) == 2 and t[1] > t[0] and stream.info['duration']>=t[1]:
if len(t) == 2 and t[1] > t[0] and stream.info['duration'] >= t[1]:
response = HttpResponse(extract.chop(path, t[0], t[1]), content_type=content_type)
filename = u"Clip of %s - %s-%s - %s %s%s" % (
item.get('title'),
@ -1000,7 +1004,7 @@ def random_annotation(request):
n = item.annotations.all().count()
pos = random.randint(0, n)
clip = item.annotations.all()[pos]
return redirect('/%s'% clip.public_id)
return redirect('/%s' % clip.public_id)
def atom_xml(request):
add_updated = True
@ -1058,8 +1062,8 @@ def atom_xml(request):
name.text = item.user.username
for topic in item.get('topics', []):
el = ET.SubElement(entry, "category")
el.attrib['term'] = topic
el = ET.SubElement(entry, "category")
el.attrib['term'] = topic
'''
el = ET.SubElement(entry, "rights")
@ -1098,7 +1102,7 @@ def atom_xml(request):
value = stream.info['audio'][0].get({
'audio_codec': 'codec'
}.get(key, key))
if value and value != -1:
if value and value != -1:
el = ET.SubElement(format, key)
el.text = unicode(value)
el = ET.SubElement(format, 'pixel_aspect_ratio')
@ -1111,14 +1115,14 @@ def atom_xml(request):
el.attrib['type'] = 'application/x-bittorrent'
el.attrib['href'] = '%s/torrent/' % page_link
el.attrib['length'] = '%s' % ox.get_torrent_size(item.torrent.path)
#FIXME: loop over streams
#for s in item.streams().filter(resolution=max(settings.CONFIG['video']['resolutions'])):
# FIXME: loop over streams
# for s in item.streams().filter(resolution=max(settings.CONFIG['video']['resolutions'])):
for s in item.streams().filter(source=None):
el = ET.SubElement(entry, "link")
el.attrib['rel'] = 'enclosure'
el.attrib['type'] = 'video/%s' % s.format
el.attrib['href'] = '%s/%sp.%s' % (page_link, s.resolution, s.format)
el.attrib['length'] = '%s'%s.media.size
el.attrib['length'] = '%s' % s.media.size
el = ET.SubElement(entry, "media:thumbnail")
thumbheight = 96
@ -1147,7 +1151,7 @@ def oembed(request):
embed_url = request.build_absolute_uri('/%s' % public_id)
if url.startswith(embed_url):
embed_url = url
if not '#embed' in embed_url:
if '#embed' not in embed_url:
embed_url = '%s#embed' % embed_url
oembed = {}
@ -1156,8 +1160,8 @@ def oembed(request):
oembed['provider_name'] = settings.SITENAME
oembed['provider_url'] = request.build_absolute_uri('/')
oembed['title'] = item.get('title')
#oembed['author_name'] = item.get('director')
#oembed['author_url'] = ??
# oembed['author_name'] = item.get('director')
# oembed['author_url'] = ??
height = max(settings.CONFIG['video']['resolutions'])
height = min(height, maxheight)
width = int(round(height * item.stream_aspect))
@ -1220,7 +1224,8 @@ def item_xml(request, id):
j = item.get_json()
j['layers'] = item.get_layers(request.user)
if 'resolution' in j:
j['resolution'] = {'width': j['resolution'][0], 'height':j['resolution'][1]}
j['resolution'] = {'width': j['resolution'][0], 'height': j['resolution'][1]}
def xmltree(root, key, data):
if isinstance(data, list) or \
isinstance(data, tuple):
@ -1296,12 +1301,12 @@ def item(request, id):
value = value = u', '.join([unicode(v) for v in value])
elif key and key.get('type') == 'float':
value = '%0.3f' % value
elif key and key.get('type') == 'time':
elif key and key.get('type') == 'time':
value = ox.format_duration(value * 1000)
data.append({'key': k, 'title': title, 'value': value})
clips = []
clip = {'in': 0, 'annotations': []}
#logged in users should have javascript. not adding annotations makes load faster
# logged in users should have javascript. not adding annotations makes load faster
if not settings.USE_IMDB and request.user.is_anonymous():
for a in item.annotations.exclude(
layer='subtitles'