Hey Quantopian peeps!
Been playing around, having some fun with all of this on the open source side after the trading link was removed. With that, I am unable to use quantopian optimize portfolio so I created my "poor mans" version. I'm posting here for comments, concerns and as a small poke to Quantopian to help prioritize open sourcing (at least partially) their solution!
The really poor man's version (but much faster)
import numpy as np
## CustomRangeReduce
# inputs:
# dic: dictionary of keys with alphas
# max_perc: the max allowed percentage of dic values
# full: what the percent is out of
#
# reduce()
# returns newly weighted dic
## Examples
# >>> CustomRangeReduce({ 'a': 0.03, 'x': 0.58, 'y': 0.07, 'd': 0.2, 'e': 0.8, 'l': 0.01, 'u': -0.345 }, max_perc=0.28).reduce()
# {'a': 0.020152671755725181, 'e': 0.28000000000000003, 'd': 0.13435114503816792, 'l': 0.0067175572519083977, 'u': -0.23175572519083962, 'y': 0.047022900763358785, 'x': 0.28000000000000003}
# >>> CustomRangeReduce({ 'a': 0.4, 'b': 0.1 }, max_perc=0.5).reduce()
# {'a': 0.5, 'b': 0.49999999999999994}
class CustomRangeReduce():
def __init__(self, dic, max_perc=0.1, full=1.2, recursive_count=0):
self.dic = dic
self.max_perc = max_perc * full
self.full = 1.0
# trace recursive_count
self.recursive_count = recursive_count
def calc_total(self, ndic=None):
if ndic == None:
ndic = self.dic
# get total
total_raw = 0
for k, v in ndic.items():
total_raw = total_raw + abs(v)
return total_raw
def reduce(self):
margin_of_error = 0.00001
total = self.calc_total(ndic=self.dic)
for k, v in self.dic.items():
self.dic[k] = np.sign(v) * min([self.max_perc, (abs(v) / total)])
if self.recursive_count >= 250:
# bump up margin_of_error allowance after N runs
self.margin_of_error = 0.02
print(">200: {}").format(self.dic)
# This allows end of endless optimization
allowed_bottom = self.full - margin_of_error
allowed_top = self.full + margin_of_error
value = self.calc_total(ndic=self.dic)
if(value <= allowed_bottom or value >= allowed_top):
self.dic = CustomRangeReduce(
self.dic,
max_perc=self.max_perc,
full=self.full,
recursive_count=self.recursive_count + 1
).reduce()
return self.dic
This takes MUCH longer than above but is more easily expandable.
from scipy.optimize import linprog
import numpy as np
class PositionSizeOptimizer():
def __init__(self,
alphas,
max_position_size=None,
ls_ratio=0.5,
portfolio_size=1
):
# linsolver only minimizes so to maximize we invert
self.alphas = -1 * np.array(alphas)
self.portfolio_size = portfolio_size
self.ls_ratio = ls_ratio
self.max_position_size = self._max_position_size(max_position_size)
self.boundries = self._boundries()
self.A = [
np.array(self._long_constraint()),
np.array(self._short_constraint()),
self._leverage_constraint()
]
self.b_ub = [
(self.ls_ratio) * self.portfolio_size,
(self.ls_ratio - 1) * self.portfolio_size,
self.portfolio_size
]
def solve(self):
return linprog(
self.alphas,
A_eq=self.A,
b_eq=self.b_ub,
bounds=self.boundries
).x
def _max_position_size(self, max_position_size):
if not max_position_size:
# default allocation allow up to 2x uniform allocation
return (2.0 / len(self.alphas)) * self.portfolio_size
else:
return max_position_size
def _leverage_constraint(self):
leverage_constraint = []
for alpha in self.alphas:
leverage_constraint.append(-1 * np.sign(alpha))
return leverage_constraint
# alpha is now inverted so looking for -
def _long_constraint(self):
return [1 if alpha <= 0 else 0 for alpha in self.alphas]
# alpha is now inverted so looking for +
def _short_constraint(self):
return [1 if alpha > 0 else 0 for alpha in self.alphas]
def _boundries(self):
boundries = []
for alpha in self.alphas:
if np.sign(alpha) == -1:
boundries.append((0, self.max_position_size))
else:
boundries.append((-1 * self.max_position_size, 0))
return boundries
def test(alphas, output, ls_ratio, portfolio_size):
po = PositionSizeOptimizer(alphas, ls_ratio=ls_ratio, portfolio_size=portfolio_size)
# print('boundries: {}'.format(po.boundries))
# print('alphas: {}'.format(po.alphas))
x = po.solve()
if(np.allclose(x, output)):
print('Success {} == {}'.format(x, output))
else:
print('Fail {} != {}'.format(x, output))
# test([0.1, 0.2, -0.3, 0.3, -0.4], np.array([0.2,0.4,0,0.4,0.0]), 1, 1)
# test([0.1, 0.2, -0.3, 0.3, -0.4], np.array([0.4,0.8,0,0.8,0.0]), 1, 2)
# test([-0.2, -0.3, 0.3, -0.4, -0.3], [0, -0.2, 0, -0.4, -0.4], 0, 1)
# test([-0.2, -0.3, 0.3, 0.4, -0.3], [0, -0.1, 0.1, 0.4, -0.4], 0.5, 1)
All of this said, these are both linear solvers whereas the quantopian optimizer is more likely a convex or the like and is much more robust. The difference of a weekend hacker and hedge fund resources I guess.
Take it or leave it.
Thanks,
Nick