#Normalize returns--Nat Log -- Technically, it uses log returns, but the difference is immaterial.
#log_ret = np.log(ret/ret.shift(1)) #Calc for log returns, if official is important (This is instead of pct_change())
log_ret = pct
ret = log_ret
#Create Temporary (Random) weights
weights = np.array(np.random.random(15)) #USE NUM SECURITIESS
#Rebalance w/ constraints (CANNOT BE > 1)
weights = weights/np.sum(weights)
print(weights)
#DEFINE ARRAYS FOR STORING METRICS
num_runs = 1000 #Kick this number up 10k-100k for better results
all_weights = np.zeros((num_runs,len(ret.columns)))
ret_arr = np.zeros(num_runs)
vol_arr = np.zeros(num_runs)
sharpe_arr = np.zeros(num_runs)
#Begin MC Loop
for run in range(num_runs):
#Weights
weights = np.array(np.random.random(15)) #CHG to number securities *** THIS is the key to MC, this random value creation for weights
#If you're looking for a challenge, try using a poisson, gamma or Student T dist!
weights = weights/np.sum(weights)
#Save weights (For reference Later)
all_weights[run,:] = weights
#Expected Ret (Record each runs return in ret_arr)
exp_ret = np.sum((log_ret.mean() * weights) * 252)
ret_arr[run] = np.sum( (log_ret.mean() * weights) * 252) #Time = year
#Exp Vol: (Lets attempt some linear algebra w/out runtime error!)
exp_vol = np.sum((log_ret.std() * weights) * 252)
# Sqrt of dot product of Transposed weights X Cov of Log returns & weights--whew.
vol_arr[run] = np.sqrt(np.dot(weights.T,np.dot(log_ret.cov()*252,weights)))
#Sharpe
SR = exp_ret/exp_vol
sharpe_arr[run] = ret_arr[run]/vol_arr[run]