init
This commit is contained in:
commit
eb3dc17ee6
6
.gitignore
vendored
Normal file
6
.gitignore
vendored
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
Loss/
|
||||||
|
runs/
|
||||||
|
screening_result/
|
||||||
|
models/
|
||||||
|
__pycache__/
|
||||||
|
|
BIN
20201203-210319.npy
Normal file
BIN
20201203-210319.npy
Normal file
Binary file not shown.
BIN
Cprofile_model_01.ps
Normal file
BIN
Cprofile_model_01.ps
Normal file
Binary file not shown.
5
Readme.md
Normal file
5
Readme.md
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
[#](#) Important Notes
|
||||||
|
|
||||||
|
## Stock Screener (RSI or COPPOCK)
|
||||||
|
|
||||||
|
## Implement Old Filtering
|
BIN
Readme.pdf
Normal file
BIN
Readme.pdf
Normal file
Binary file not shown.
BIN
Samples_Cross_Check.tar
Normal file
BIN
Samples_Cross_Check.tar
Normal file
Binary file not shown.
0
ScreenOutput.xlsx
Normal file
0
ScreenOutput.xlsx
Normal file
63
automatonv1.py
Normal file
63
automatonv1.py
Normal file
@ -0,0 +1,63 @@
|
|||||||
|
import threading
|
||||||
|
import time
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
file_symbol=open("symbols.txt","r")
|
||||||
|
symbols=file_symbol.read().splitlines()
|
||||||
|
#print(symbols)
|
||||||
|
symbols = [x.strip(' ') for x in symbols]
|
||||||
|
|
||||||
|
#symbols=symbols[:-5]
|
||||||
|
class myThread (threading.Thread):
|
||||||
|
def __init__(self, threadID):
|
||||||
|
threading.Thread.__init__(self)
|
||||||
|
self.threadID = threadID
|
||||||
|
#self.name = name
|
||||||
|
#self.counter = counter
|
||||||
|
def run(self):
|
||||||
|
print("Starting Thread Number %s"%self.threadID)
|
||||||
|
# Get lock to synchronize threads
|
||||||
|
#threadLock.acquire()
|
||||||
|
#self.status="idle"
|
||||||
|
while len(symbols)!=0:
|
||||||
|
#self.status="busy"
|
||||||
|
self.symbol=symbols.pop(0)
|
||||||
|
self.job=subprocess.call(['gnome-terminal','-x','taskset','-c',str(self.threadID),'python3','main_model.py',self.symbol,"800"])
|
||||||
|
time.sleep(99999)
|
||||||
|
print("Prediction for %s done"%self.symbol)
|
||||||
|
print("Job for Thread Number %s done"%self.threadID)
|
||||||
|
#print_time(self.name, self.counter, 3)
|
||||||
|
# Free lock to release next thread
|
||||||
|
#threadLock.release()
|
||||||
|
|
||||||
|
def print_time(threadName, delay, counter):
|
||||||
|
while counter:
|
||||||
|
time.sleep(delay)
|
||||||
|
print("%s: %s" % (threadName, time.ctime(time.time())))
|
||||||
|
counter -= 1
|
||||||
|
|
||||||
|
#threadLock = threading.Lock()
|
||||||
|
threads = []
|
||||||
|
|
||||||
|
|
||||||
|
for numberID in range(0,len(symbols)-1,1):
|
||||||
|
thread = myThread(numberID)
|
||||||
|
thread.start()
|
||||||
|
threads.append(thread)
|
||||||
|
#threadID += 1
|
||||||
|
# Create new threads
|
||||||
|
#thread1 = myThread(1, "Thread-1", 1)
|
||||||
|
#thread2 = myThread(2, "Thread-2", 2)
|
||||||
|
|
||||||
|
# Start new Threads
|
||||||
|
#thread1.start()
|
||||||
|
#thread2.start()
|
||||||
|
|
||||||
|
# Add threads to thread list
|
||||||
|
#threads.append(thread1)
|
||||||
|
#threads.append(thread2)
|
||||||
|
|
||||||
|
# Wait for all threads to complete
|
||||||
|
for t in threads:
|
||||||
|
t.join()
|
||||||
|
print("Exiting Main Thread")
|
63
automatonv2.py
Normal file
63
automatonv2.py
Normal file
@ -0,0 +1,63 @@
|
|||||||
|
import threading
|
||||||
|
import time
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
file_symbol=open("symbols.txt","r")
|
||||||
|
symbols=file_symbol.read().splitlines()
|
||||||
|
#print(symbols)
|
||||||
|
symbols = [x.strip(' ') for x in symbols]
|
||||||
|
|
||||||
|
#symbols=symbols[:-5]
|
||||||
|
class myThread (threading.Thread):
|
||||||
|
def __init__(self, threadID):
|
||||||
|
threading.Thread.__init__(self)
|
||||||
|
self.threadID = threadID
|
||||||
|
#self.name = name
|
||||||
|
#self.counter = counter
|
||||||
|
def run(self):
|
||||||
|
print("Starting Thread Number %s"%self.threadID)
|
||||||
|
# Get lock to synchronize threads
|
||||||
|
#threadLock.acquire()
|
||||||
|
#self.status="idle"
|
||||||
|
while len(symbols)!=0:
|
||||||
|
#self.status="busy"
|
||||||
|
self.symbol=symbols.pop(0)
|
||||||
|
self.job=subprocess.call(['gnome-terminal','-x','taskset','-c',str(self.threadID),'python3','predictor.py',self.symbol,"800"])
|
||||||
|
time.sleep(99999)
|
||||||
|
print("Prediction for %s done"%self.symbol)
|
||||||
|
print("Job for Thread Number %s done"%self.threadID)
|
||||||
|
#print_time(self.name, self.counter, 3)
|
||||||
|
# Free lock to release next thread
|
||||||
|
#threadLock.release()
|
||||||
|
|
||||||
|
def print_time(threadName, delay, counter):
|
||||||
|
while counter:
|
||||||
|
time.sleep(delay)
|
||||||
|
print("%s: %s" % (threadName, time.ctime(time.time())))
|
||||||
|
counter -= 1
|
||||||
|
|
||||||
|
#threadLock = threading.Lock()
|
||||||
|
threads = []
|
||||||
|
|
||||||
|
|
||||||
|
for numberID in range(0,len(symbols)-1,1):
|
||||||
|
thread = myThread(numberID)
|
||||||
|
thread.start()
|
||||||
|
threads.append(thread)
|
||||||
|
#threadID += 1
|
||||||
|
# Create new threads
|
||||||
|
#thread1 = myThread(1, "Thread-1", 1)
|
||||||
|
#thread2 = myThread(2, "Thread-2", 2)
|
||||||
|
|
||||||
|
# Start new Threads
|
||||||
|
#thread1.start()
|
||||||
|
#thread2.start()
|
||||||
|
|
||||||
|
# Add threads to thread list
|
||||||
|
#threads.append(thread1)
|
||||||
|
#threads.append(thread2)
|
||||||
|
|
||||||
|
# Wait for all threads to complete
|
||||||
|
for t in threads:
|
||||||
|
t.join()
|
||||||
|
print("Exiting Main Thread")
|
14
backtest.py
Normal file
14
backtest.py
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
import matplotlib.pyplot as plt
|
||||||
|
import pandas as pd
|
||||||
|
import numpy as np
|
||||||
|
import matplotlib.animation as animation
|
||||||
|
import yfinance as yf
|
||||||
|
|
||||||
|
symbol=yf.Ticker("AAPL")
|
||||||
|
data=symbol.history(interval="1d",period="1y")
|
||||||
|
|
||||||
|
close=data["Close"]
|
||||||
|
|
||||||
|
print(close)
|
||||||
|
plt.plot(close)
|
||||||
|
plt.show()
|
11
blast.sh
Executable file
11
blast.sh
Executable file
@ -0,0 +1,11 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
filename="./symbols.txt"
|
||||||
|
n=1
|
||||||
|
|
||||||
|
while read line;
|
||||||
|
do
|
||||||
|
echo "$line"
|
||||||
|
taskset -c $((n-1)) python3 ./main_model.py $line 50 &
|
||||||
|
n=$((n+1))
|
||||||
|
done < $filename
|
9
check_gap.py
Normal file
9
check_gap.py
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
import json
|
||||||
|
|
||||||
|
f = open("./Samples/MELI/2021-01-30/MELI0.vezpal2")
|
||||||
|
|
||||||
|
a = json.load(f)
|
||||||
|
|
||||||
|
print(len(a[0]))
|
||||||
|
print(len(a[1]))
|
||||||
|
print(len(a[2]))
|
BIN
djangoBackup
Normal file
BIN
djangoBackup
Normal file
Binary file not shown.
BIN
djangoBackup.tar.gz
Normal file
BIN
djangoBackup.tar.gz
Normal file
Binary file not shown.
16
indicator_MACD.py
Normal file
16
indicator_MACD.py
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
import yfinance as yf
|
||||||
|
import pandas as pd
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
|
||||||
|
def macd_data(data):
|
||||||
|
tick_history = data
|
||||||
|
tick_macd = pd.DataFrame()
|
||||||
|
tick_macd['Close'] = tick_history['Close']
|
||||||
|
tick_macd['EMA-12'] = tick_macd['Close'].ewm(span=12, adjust=False).mean()
|
||||||
|
tick_macd['EMA-26'] = tick_macd['Close'].ewm(span=26, adjust=False).mean()
|
||||||
|
tick_macd['MACD'] = tick_macd['EMA-12'] - tick_macd['EMA-26']
|
||||||
|
tick_macd['Signal'] = tick_macd['MACD'].ewm(span=9, adjust=False).mean()
|
||||||
|
tick_macd['Gap'] = tick_macd['MACD'] - tick_macd['Signal']
|
||||||
|
print(tick_macd.index)
|
||||||
|
|
||||||
|
return tick_macd
|
BIN
loss_animation.mp4
Normal file
BIN
loss_animation.mp4
Normal file
Binary file not shown.
615
main_model.py
Normal file
615
main_model.py
Normal file
@ -0,0 +1,615 @@
|
|||||||
|
import os
|
||||||
|
import sys
|
||||||
|
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
|
||||||
|
import tensorflow as tf
|
||||||
|
import numpy as np
|
||||||
|
import matplotlib.animation as animation
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
import seaborn as sns
|
||||||
|
import pandas as pd
|
||||||
|
from sklearn.preprocessing import MinMaxScaler
|
||||||
|
from datetime import datetime
|
||||||
|
from datetime import timedelta
|
||||||
|
from tqdm import tqdm
|
||||||
|
sns.set()
|
||||||
|
import json
|
||||||
|
tf.compat.v1.random.set_random_seed(1234)
|
||||||
|
from matplotlib import style
|
||||||
|
# import matplotlib.backends.backend_qt5agg
|
||||||
|
# %matplotlib qt
|
||||||
|
style.use('ggplot')
|
||||||
|
import math
|
||||||
|
import yfinance as yf
|
||||||
|
import time
|
||||||
|
from datetime import date, timedelta
|
||||||
|
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
|
||||||
|
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
|
||||||
|
import cProfile
|
||||||
|
import pstats
|
||||||
|
|
||||||
|
# In[34]:
|
||||||
|
|
||||||
|
tf.compat.v1.disable_eager_execution()
|
||||||
|
# INITIAL VARS
|
||||||
|
test_size = 14
|
||||||
|
simulation_size = 1
|
||||||
|
|
||||||
|
# MODEL VARS
|
||||||
|
num_layers = 2
|
||||||
|
size_layer = 128
|
||||||
|
timestamp = 7
|
||||||
|
epoch = 20
|
||||||
|
dropout_rate = 0.8
|
||||||
|
prediction_gap = sys.argv[2]
|
||||||
|
future_day = test_size
|
||||||
|
learning_rate = 0.01
|
||||||
|
graph_loss = []
|
||||||
|
|
||||||
|
# In[35]:
|
||||||
|
# Necessary Dirs
|
||||||
|
# def date_manage(date1,date2=None):
|
||||||
|
# if date2 is None:
|
||||||
|
# date2=date1+timedelta(days=365)
|
||||||
|
# date_col=[]
|
||||||
|
# for n in range(int ((date2 - date1).days)+1):
|
||||||
|
# date_col.append(date1 + timedelta(n))
|
||||||
|
# weekdays = [5,6]
|
||||||
|
# date_result=[]
|
||||||
|
# for dt in date_col:
|
||||||
|
# if dt.weekday() not in weekdays:
|
||||||
|
# dt.strftime("%Y-%m-%d")
|
||||||
|
# return date_result
|
||||||
|
|
||||||
|
|
||||||
|
# In[36]:
|
||||||
|
def loss_animate(ax, i):
|
||||||
|
json_loss = pd.DataFrame(total_loss)
|
||||||
|
ax.plot(i)
|
||||||
|
return ax
|
||||||
|
|
||||||
|
|
||||||
|
def loader(symbol,test_size,date):
|
||||||
|
# dateparse = lambda dates : pd.datetime.strptime(dates,'%Y-%m')
|
||||||
|
# df = pd.read_csv('../dataset/IBMCUT.csv',parse_dates=['Date'], index_col = 'Date', date_parser=dateparse)
|
||||||
|
|
||||||
|
df=yf.Ticker(symbol)
|
||||||
|
# df=df.history(period="1y",interval="1d")
|
||||||
|
# df=df.history(start=date-timedelta(days=365),end=date,interval="1d")
|
||||||
|
df=df.history(start=date-timedelta(days=365*3),end=date,interval="1d")
|
||||||
|
df=df.reset_index(level=0)
|
||||||
|
|
||||||
|
# df=df.drop(columns=['Dividends'], axis=1)
|
||||||
|
df=df.drop(columns=['Stock Splits'], axis=1)
|
||||||
|
|
||||||
|
df['Up'] = df['High'].ewm(span=6,adjust=False).mean() + 2* df['High'].rolling(window=6).std()
|
||||||
|
df['Down']= df['Low'].ewm(span=8,adjust=False).mean() - 2* df['Low'].rolling(window=8).std()
|
||||||
|
|
||||||
|
df=df.dropna()
|
||||||
|
df=df.drop(df.tail(7).index)
|
||||||
|
|
||||||
|
date_ori = pd.to_datetime(df.iloc[:, 0]).tolist()
|
||||||
|
# for i in range(test_size):
|
||||||
|
# #date_ori.append(date_ori[-1] + timedelta(days = 1))
|
||||||
|
# add=1
|
||||||
|
# while ((date_ori[-1]) + timedelta(days = add)).weekday() in [5,6]:
|
||||||
|
# add=add+1
|
||||||
|
# date_ori.append(date_ori[-1] + timedelta(days = add))
|
||||||
|
date_ori = pd.Series(date_ori).dt.strftime(date_format = '%Y-%m-%d').tolist()
|
||||||
|
print(len(df),len(date_ori))
|
||||||
|
return df,date_ori
|
||||||
|
|
||||||
|
def trueloader(symbol,test_size,date):
|
||||||
|
#df2 = pd.read_csv(symbol)
|
||||||
|
#print("LENDF2:",len(df2))
|
||||||
|
|
||||||
|
df2 = yf.Ticker(symbol)
|
||||||
|
# df2 = df2.history(start=date-timedelta(days=365),end=date,interval="1d")
|
||||||
|
df2 = df2.history(start=date-timedelta(days=365*3),end=date,interval="1d")
|
||||||
|
df2 = df2.reset_index(level=0)
|
||||||
|
df2 = df2.drop(columns=['Dividends'], axis=1)
|
||||||
|
df2 = df2.drop(columns=['Stock Splits'], axis=1)
|
||||||
|
df2 = df2.drop(df2.head(7).index)
|
||||||
|
# df2 = df2.drop(df2.tail(test_size).index)
|
||||||
|
|
||||||
|
return df2
|
||||||
|
|
||||||
|
# In[38]:
|
||||||
|
|
||||||
|
def preproc(df):
|
||||||
|
minmax = MinMaxScaler().fit(df.iloc[:,1:9].astype('float32')) # Close, Volume, and all
|
||||||
|
df_log = minmax.transform(df.iloc[:, 1:9].astype('float32')) # Close, Volume, and all
|
||||||
|
df_log = pd.DataFrame(df_log)
|
||||||
|
df_log.head()
|
||||||
|
return df_log,minmax
|
||||||
|
|
||||||
|
|
||||||
|
# In[39]:
|
||||||
|
|
||||||
|
|
||||||
|
class Model:
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
learning_rate,
|
||||||
|
num_layers,
|
||||||
|
size,
|
||||||
|
size_layer,
|
||||||
|
output_size,
|
||||||
|
forget_bias = 0.1,
|
||||||
|
):
|
||||||
|
def lstm_cell(size_layer):
|
||||||
|
#print("ASDasdasdasd",len(tf.compat.v1.nn.rnn_cell.LSTMCell(size_layer, state_is_tuple = False))
|
||||||
|
return tf.compat.v1.nn.rnn_cell.LSTMCell(size_layer, state_is_tuple = False)
|
||||||
|
|
||||||
|
rnn_cells = tf.compat.v1.nn.rnn_cell.MultiRNNCell(
|
||||||
|
[lstm_cell(size_layer) for _ in range(num_layers)],
|
||||||
|
state_is_tuple = False,
|
||||||
|
)
|
||||||
|
self.X = tf.compat.v1.placeholder(tf.float32, (None, None, size))
|
||||||
|
self.Y = tf.compat.v1.placeholder(tf.float32, (None, output_size))
|
||||||
|
# self.X = tf.keras.Input((None, size),dtype=tf.float32)
|
||||||
|
# self.Y = tf.keras.Input((output_size),dtype=tf.float32)
|
||||||
|
drop = tf.compat.v1.nn.rnn_cell.DropoutWrapper(
|
||||||
|
rnn_cells, output_keep_prob = forget_bias
|
||||||
|
)
|
||||||
|
# print("XXXX:",self.X)
|
||||||
|
# print("XXXX:",self.X.shape)
|
||||||
|
# print("XXXX:",self.Y)
|
||||||
|
# print("XXXX:",self.Y.shape)
|
||||||
|
#print("LOOOASDSDASD")
|
||||||
|
self.hidden_layer = tf.compat.v1.placeholder(
|
||||||
|
tf.float32, (None, num_layers * 2 * size_layer)
|
||||||
|
)
|
||||||
|
self.outputs, self.last_state = tf.compat.v1.nn.dynamic_rnn(
|
||||||
|
drop, self.X, initial_state = self.hidden_layer, dtype = tf.float32
|
||||||
|
)
|
||||||
|
#rint("INIDIA",self.outputs)
|
||||||
|
self.logits = tf.compat.v1.layers.dense(self.outputs[-1], output_size)
|
||||||
|
self.cost = tf.reduce_mean(tf.square(self.Y - self.logits))
|
||||||
|
self.optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate).minimize(
|
||||||
|
self.cost
|
||||||
|
)
|
||||||
|
#print("cost:",self.cost)
|
||||||
|
#print("cost:",self.optimizer)
|
||||||
|
|
||||||
|
def calculate_accuracy(real, predict):
|
||||||
|
real = np.array(real) + 1
|
||||||
|
predict = np.array(predict) + 1
|
||||||
|
percentage = 1 - np.sqrt(np.mean(np.square((real - predict) / real)))
|
||||||
|
return percentage * 100
|
||||||
|
|
||||||
|
def anchor(signal, weight):
|
||||||
|
buffer = []
|
||||||
|
last = signal[0]
|
||||||
|
for i in signal:
|
||||||
|
smoothed_val = last * weight + (1 - weight) * i
|
||||||
|
buffer.append(smoothed_val)
|
||||||
|
last = smoothed_val
|
||||||
|
return buffer
|
||||||
|
|
||||||
|
|
||||||
|
# In[40]:
|
||||||
|
def main_train(df_beta, df_train, df, minmax):
|
||||||
|
|
||||||
|
modelnn = Model(
|
||||||
|
learning_rate, num_layers, df_beta.shape[1], size_layer, df_beta.shape[1], dropout_rate
|
||||||
|
)
|
||||||
|
sess = tf.compat.v1.Session()
|
||||||
|
sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(intra_op_parallelism_threads=64,inter_op_parallelism_threads=64))
|
||||||
|
sess.run(tf.compat.v1.global_variables_initializer())
|
||||||
|
pbar = tqdm(range(500), desc = 'Main train loop')
|
||||||
|
for i in pbar:
|
||||||
|
init_value = np.zeros((1, num_layers * 2 * size_layer))
|
||||||
|
total_loss, total_acc = [], []
|
||||||
|
for k in range(0, df_train.shape[0] - 1, timestamp):
|
||||||
|
index = min(k + timestamp, df_train.shape[0] - 1)
|
||||||
|
batch_x = np.expand_dims(
|
||||||
|
df_train.iloc[k : index, :].values, axis = 0
|
||||||
|
)
|
||||||
|
|
||||||
|
batch_y = df_train.iloc[k + 1 : index + 1, :].values
|
||||||
|
#print("BATCH_X:",batch_x)
|
||||||
|
#print("BATCH_Y:",batch_y)
|
||||||
|
logits, last_state,__,loss = sess.run(
|
||||||
|
[modelnn.logits, modelnn.last_state,modelnn.optimizer, modelnn.cost],
|
||||||
|
feed_dict = {
|
||||||
|
modelnn.X: batch_x,
|
||||||
|
modelnn.Y: batch_y,
|
||||||
|
modelnn.hidden_layer: init_value,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
init_value = last_state
|
||||||
|
total_loss.append(loss)
|
||||||
|
total_acc.append(calculate_accuracy(batch_y[:, 0], logits[:, 0]))
|
||||||
|
# json_loss.to_json("./loss.json")
|
||||||
|
graph_loss.append(np.mean(total_loss))
|
||||||
|
|
||||||
|
np.save(loss_file, np.array(graph_loss))
|
||||||
|
pbar.set_postfix(cost = np.mean(total_loss), min_acc = np.min(total_acc), mean_acc=np.mean(total_acc))
|
||||||
|
|
||||||
|
|
||||||
|
def forecast(df_beta,df_train,df,minmax):
|
||||||
|
# print("DF_BETA:",df_beta)
|
||||||
|
# print("DF_TRAIN:",df_train)
|
||||||
|
# tf.compat.v1.variable_scope("AAA", reuse=True)
|
||||||
|
tf.compat.v1.reset_default_graph()
|
||||||
|
modelnn = Model(
|
||||||
|
learning_rate, num_layers, df_beta.shape[1], size_layer, df_beta.shape[1], dropout_rate
|
||||||
|
)
|
||||||
|
# print("MODELX: ",modelnn.X)
|
||||||
|
# print("MODELY: ",modelnn.Y)
|
||||||
|
# print("MODELLayer: ",modelnn.hidden_layer)
|
||||||
|
sess = tf.compat.v1.Session()
|
||||||
|
sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(intra_op_parallelism_threads=64,inter_op_parallelism_threads=64))
|
||||||
|
sess.run(tf.compat.v1.global_variables_initializer())
|
||||||
|
date_ori = pd.to_datetime(df.iloc[:, 0]).tolist()
|
||||||
|
# print("INI___!:",df_train.shape[0] - 1, timestamp)
|
||||||
|
# print(df_train.shape[0])
|
||||||
|
pbar = tqdm(range(epoch), desc = 'train loop')
|
||||||
|
for i in pbar:
|
||||||
|
# init_value = np.zeros((1, num_layers * 2 * size_layer))
|
||||||
|
total_loss, total_acc = [], []
|
||||||
|
for k in range(0, df_train.shape[0] - 1, timestamp):
|
||||||
|
init_value = np.zeros((1, num_layers * 2 * size_layer))
|
||||||
|
index = min(k + timestamp, df_train.shape[0] - 1)
|
||||||
|
batch_x = np.expand_dims(
|
||||||
|
df_train.iloc[k : index, :].values, axis = 0
|
||||||
|
)
|
||||||
|
|
||||||
|
batch_y = df_train.iloc[k + 1 : index + 1, :].values
|
||||||
|
#print("BATCH_X:",batch_x)
|
||||||
|
#print("BATCH_Y:",batch_y)
|
||||||
|
logits, last_state,__,loss = sess.run(
|
||||||
|
[modelnn.logits, modelnn.last_state,modelnn.optimizer, modelnn.cost],
|
||||||
|
feed_dict = {
|
||||||
|
modelnn.X: batch_x,
|
||||||
|
modelnn.Y: batch_y,
|
||||||
|
modelnn.hidden_layer: init_value,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
#print("BATCHX:",batch_x)
|
||||||
|
# print("MODELX: ",modelnn.X)
|
||||||
|
# print("MODELY: ",modelnn.Y)
|
||||||
|
# print("MODELLayer: ",modelnn.hidden_layer)
|
||||||
|
#print("OUTSSS:",len(outs))
|
||||||
|
#print("opt:",opt)
|
||||||
|
#print("outs1",batch_x[0])
|
||||||
|
#print("outs2",outs[1])
|
||||||
|
#print("outs3",outs[2])
|
||||||
|
#print("outs4",outs[3])
|
||||||
|
#input()
|
||||||
|
init_value = last_state
|
||||||
|
total_loss.append(loss)
|
||||||
|
#print("LOGITS:",logits[:, 0])
|
||||||
|
total_acc.append(calculate_accuracy(batch_y[:, 0], logits[:, 0]))
|
||||||
|
# json_loss.to_json("./loss.json")
|
||||||
|
graph_loss.append(np.mean(total_loss))
|
||||||
|
|
||||||
|
np.save(loss_file, np.array(graph_loss))
|
||||||
|
pbar.set_postfix(cost = np.mean(total_loss), min_acc = np.min(total_acc), mean_acc=np.mean(total_acc))
|
||||||
|
future_day = test_size
|
||||||
|
|
||||||
|
output_predict = np.zeros((df_train.shape[0] + future_day, df_train.shape[1]))
|
||||||
|
output_predict[0] = df_train.iloc[0]
|
||||||
|
upper_b = (df_train.shape[0] // timestamp) * timestamp
|
||||||
|
init_value = np.zeros((1, num_layers * 2 * size_layer))
|
||||||
|
|
||||||
|
for k in range(0, (df_train.shape[0] // timestamp) * timestamp, timestamp):
|
||||||
|
out_logits, last_state = sess.run(
|
||||||
|
[modelnn.logits, modelnn.last_state],
|
||||||
|
feed_dict = {
|
||||||
|
modelnn.X: np.expand_dims(
|
||||||
|
df_train.iloc[k : k + timestamp], axis = 0
|
||||||
|
),
|
||||||
|
modelnn.hidden_layer: init_value,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
init_value = last_state
|
||||||
|
output_predict[k + 1 : k + timestamp + 1] = out_logits
|
||||||
|
|
||||||
|
if upper_b != df_train.shape[0]:
|
||||||
|
out_logits, last_state = sess.run(
|
||||||
|
[modelnn.logits, modelnn.last_state],
|
||||||
|
feed_dict = {
|
||||||
|
modelnn.X: np.expand_dims(df_train.iloc[upper_b:], axis = 0),
|
||||||
|
modelnn.hidden_layer: init_value,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
output_predict[upper_b + 1 : df_train.shape[0] + 1] = out_logits
|
||||||
|
future_day -= 1
|
||||||
|
date_ori.append(date_ori[-1] + timedelta(days = 1))
|
||||||
|
|
||||||
|
init_value = last_state
|
||||||
|
|
||||||
|
for i in range(future_day):
|
||||||
|
o = output_predict[-future_day - timestamp + i:-future_day + i]
|
||||||
|
out_logits, last_state = sess.run(
|
||||||
|
[modelnn.logits, modelnn.last_state],
|
||||||
|
feed_dict = {
|
||||||
|
modelnn.X: np.expand_dims(o, axis = 0),
|
||||||
|
modelnn.hidden_layer: init_value,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
init_value = last_state
|
||||||
|
output_predict[-future_day + i] = out_logits[-1]
|
||||||
|
date_ori.append(date_ori[-1] + timedelta(days = 1))
|
||||||
|
|
||||||
|
output_predict = minmax.inverse_transform(output_predict)
|
||||||
|
deep_future = anchor(output_predict[:, 0], 0.3)
|
||||||
|
sess.close()
|
||||||
|
sess.__del__()
|
||||||
|
return deep_future
|
||||||
|
|
||||||
|
|
||||||
|
# In[41]:
|
||||||
|
|
||||||
|
|
||||||
|
def newaccuration(accepted_results,truetrend):
|
||||||
|
hasilutama=0
|
||||||
|
indexbagus=0
|
||||||
|
truest=0
|
||||||
|
predictest=0
|
||||||
|
for i,x in enumerate(accepted_results):
|
||||||
|
a=x[-(test_size+2):]
|
||||||
|
|
||||||
|
#a=x[:((test_size+2)/2)]
|
||||||
|
print("a",a)
|
||||||
|
b=truetrend[-(test_size+1):]
|
||||||
|
#print("b",b)
|
||||||
|
hasil=0
|
||||||
|
true=[]
|
||||||
|
predict=[]
|
||||||
|
for xy in range(1,len((a))):
|
||||||
|
if a[xy]<a[xy-1]:
|
||||||
|
predict.append("Down")
|
||||||
|
else:
|
||||||
|
predict.append("Up")
|
||||||
|
if b[xy]<b[xy-1]:
|
||||||
|
|
||||||
|
true.append("Down")
|
||||||
|
else:
|
||||||
|
true.append("Up")
|
||||||
|
|
||||||
|
print(true)
|
||||||
|
print(predict)
|
||||||
|
for xz in range(len(true)):
|
||||||
|
if true[xz]==predict[xz]:
|
||||||
|
hasil=hasil+1
|
||||||
|
if hasil > hasilutama:
|
||||||
|
hasilutama=hasil
|
||||||
|
indexbagus=i
|
||||||
|
truest=true
|
||||||
|
predictest=predict
|
||||||
|
salah=[]
|
||||||
|
for xz in range(len(truest)):
|
||||||
|
if truest[xz]!=predictest[xz]:
|
||||||
|
salah.append(xz)
|
||||||
|
#if xz!=0:
|
||||||
|
#salah.append(xz-1)
|
||||||
|
#print("INI:",b)
|
||||||
|
print("TRUEST",truest)
|
||||||
|
print("predictest",predictest)
|
||||||
|
return hasilutama,indexbagus,salah
|
||||||
|
|
||||||
|
# In[42]:
|
||||||
|
|
||||||
|
|
||||||
|
def betaforecast(simulationsize,dfx,dftrain,df,df2,minmax):
|
||||||
|
results = []
|
||||||
|
for i in range(simulationsize):
|
||||||
|
forecast_res = forecast(df,dftrain,dfx,minmax)
|
||||||
|
results.append(forecast_res)
|
||||||
|
accepted_results = []
|
||||||
|
|
||||||
|
while not (np.array(results[0][-test_size:]) < np.min(dfx['Close'])).sum() == 0 and (np.array(results[0][-test_size:]) > np.max(dfx['Close']) * 2).sum() == 0:
|
||||||
|
print("++++++++++++++++++++++++")
|
||||||
|
print("Forecast Recalled...")
|
||||||
|
results[0]=forecast(df,dftrain,dfx,minmax)
|
||||||
|
return results[0]
|
||||||
|
|
||||||
|
|
||||||
|
# In[43]:
|
||||||
|
|
||||||
|
|
||||||
|
def interval(p1,p2):
|
||||||
|
return abs((p1) - (p2))
|
||||||
|
|
||||||
|
|
||||||
|
# In[44]:
|
||||||
|
|
||||||
|
|
||||||
|
def checkaccuracy2(true):
|
||||||
|
avg=[]
|
||||||
|
for x in range(len(true)-7):
|
||||||
|
avg.append(interval(true[x],true[x+1]))
|
||||||
|
average=sum(avg) / len(avg)
|
||||||
|
return average
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# In[45]:
|
||||||
|
|
||||||
|
|
||||||
|
def checkaccuracy(predict,true,filterx, test_size):
|
||||||
|
print("True Length: ",len(true))
|
||||||
|
print("Predict Length: ",len(predict))
|
||||||
|
|
||||||
|
# avg=[]
|
||||||
|
|
||||||
|
# for x in range(len(true)-5):
|
||||||
|
# avg.append(interval(true[x],predict[x]))
|
||||||
|
# average=sum(avg) / len(avg)
|
||||||
|
# print("AVG1:",average)
|
||||||
|
# print("AVG2:",threshold)
|
||||||
|
|
||||||
|
temp_predict=predict[-test_size:]
|
||||||
|
temp_true=true[-test_size:]
|
||||||
|
|
||||||
|
# avg2=interval(max(predict),min(predict))
|
||||||
|
count=0
|
||||||
|
print("------------------------------------")
|
||||||
|
for x in range(test_size):
|
||||||
|
# acc_var1 = temp_true[x]-(1/filterx*temp_true[x])
|
||||||
|
acc_var1 = temp_true[x]-(filterx/10)
|
||||||
|
acc_var2 = temp_predict[x]
|
||||||
|
# acc_var3 = temp_true[x]+(1/filterx*temp_true[x])
|
||||||
|
acc_var3 = temp_true[x]+(filterx/10)
|
||||||
|
acc_condition = acc_var1 <= acc_var2 <= acc_var3
|
||||||
|
# print("Var 1 : ",acc_var1)
|
||||||
|
# print("Var 2 : ",acc_var2)
|
||||||
|
# print("Var 3 : ",acc_var3)
|
||||||
|
# print("Day "+str(x+1)+" "+str(int(acc_var1))+" "+str(int(acc_var2))+" "+str(int(acc_var3))+" : ",acc_condition)
|
||||||
|
print("Day "+str(x+1)+", Price : "+str(int(temp_true[x]))+" ,Gap = "+str(int(abs(temp_predict[x]-temp_true[x])))+" : ",acc_condition)
|
||||||
|
if (acc_condition):
|
||||||
|
count=count+1
|
||||||
|
print("------------------------------------")
|
||||||
|
if count>7:
|
||||||
|
print("Result True")
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
print("Result False")
|
||||||
|
return False
|
||||||
|
|
||||||
|
# if average>threshold:
|
||||||
|
# return False
|
||||||
|
# else:
|
||||||
|
# return True
|
||||||
|
|
||||||
|
|
||||||
|
# In[46]:
|
||||||
|
|
||||||
|
|
||||||
|
def findthreshold(simulationsize,dfx,dftrain,df,df2,minmax):
|
||||||
|
results=[]
|
||||||
|
for i in range(simulationsize):
|
||||||
|
results.append(forecast(df,dftrain,dfx,minmax))
|
||||||
|
accepted_results = []
|
||||||
|
|
||||||
|
for r in results:
|
||||||
|
|
||||||
|
if (np.array(r[-test_size:]) < np.min(dfx['Close'])).sum() == 0 and (np.array(r[-test_size:]) > np.max(dfx['Close']) * 2).sum() == 0:
|
||||||
|
accepted_results.append(r)
|
||||||
|
|
||||||
|
finalavg=999999
|
||||||
|
for o in accepted_results:
|
||||||
|
avg=[]
|
||||||
|
for x in range(len(o)-5):
|
||||||
|
avg.append(interval(o[x],df2[x]))
|
||||||
|
average=sum(avg) / len(avg)
|
||||||
|
if average<=finalavg:
|
||||||
|
finalavg=average
|
||||||
|
|
||||||
|
return finalavg
|
||||||
|
|
||||||
|
def temp_data(date, xi, resultfinal, df2, date_col,x):
|
||||||
|
print("Called . . . ")
|
||||||
|
if os.path.isdir("TempData/") == False:
|
||||||
|
os.mkdir("TempData/")
|
||||||
|
if os.path.isdir("TempData/%s"%x) == False:
|
||||||
|
os.mkdir("TempData/%s"%x)
|
||||||
|
if os.path.isdir("TempData/%s/"%x+str(date)) == False:
|
||||||
|
os.mkdir("TempData/%s/"%x+str(date))
|
||||||
|
with open("TempData/%s/"%x+str(date)+"/"+x+str(xi)+".vezpal2","w+") as oop:
|
||||||
|
main=[]
|
||||||
|
main.append(resultfinal) #prediction
|
||||||
|
main.append(list(df2['Close']))
|
||||||
|
main.append(date_col)
|
||||||
|
# main.append(3)
|
||||||
|
# main.append([0])
|
||||||
|
json.dump(main,oop)
|
||||||
|
|
||||||
|
def automaton(simulationsize,date):
|
||||||
|
|
||||||
|
# symbols=["AAPL"]
|
||||||
|
# symbols = sys.argv[1:]
|
||||||
|
symbols = []
|
||||||
|
symbols.append(sys.argv[1])
|
||||||
|
times=[]
|
||||||
|
for x in symbols:
|
||||||
|
temp_time=[]
|
||||||
|
temp_time.append(x)
|
||||||
|
counter=0
|
||||||
|
validity=0
|
||||||
|
df,date_col=loader(x,test_size,date)
|
||||||
|
#print(type(df))
|
||||||
|
dfx=df
|
||||||
|
#print("ASDSAD")
|
||||||
|
df2=trueloader(x,test_size,date)
|
||||||
|
df,minmax=preproc(df)
|
||||||
|
dftrain=df
|
||||||
|
wrong=[1,2,3,4,5]
|
||||||
|
#avg=checkaccuracy2(list(df2["Close"]))
|
||||||
|
#start=time.time()
|
||||||
|
#avg=findthreshold(50,dfx,dftrain,df,list(df2["Close"]),minmax)
|
||||||
|
#temp_time.append(time.time()-start)
|
||||||
|
start=time.time()
|
||||||
|
filterx = int(prediction_gap)
|
||||||
|
able=False
|
||||||
|
print("============== || Initial Train || =============")
|
||||||
|
main_train(df,dftrain,dfx,minmax)
|
||||||
|
for xi in range(5):
|
||||||
|
decision=False
|
||||||
|
while (decision==False):
|
||||||
|
print()
|
||||||
|
print("====== [ Foreacasting Attempt : "+str(counter+1)+" ] ===========")
|
||||||
|
print("====== [ Progress : "+str(xi)+"/5 ] ")
|
||||||
|
resultfinal=betaforecast(simulationsize,dfx,dftrain,df,df2,minmax)
|
||||||
|
#validity=valid
|
||||||
|
decision=checkaccuracy(resultfinal,list(df2["Close"]),filterx, test_size)
|
||||||
|
#wrong=invalid
|
||||||
|
if decision==True:
|
||||||
|
able=True
|
||||||
|
print("ABLE")
|
||||||
|
print(str(filterx))
|
||||||
|
if counter > 100 and decision != True:
|
||||||
|
counter = 0
|
||||||
|
filterx=filterx+1
|
||||||
|
print("Filter X reduced to : "+str(filterx))
|
||||||
|
print("Decision Status : ", decision)
|
||||||
|
print("**************************************")
|
||||||
|
# avg=avg+(1/3*avg)
|
||||||
|
if filterx>1000:
|
||||||
|
print("====== [ GG, we gave up] =====")
|
||||||
|
continue
|
||||||
|
counter=counter+1
|
||||||
|
temp_data(date, xi, resultfinal, df2, date_col, x)
|
||||||
|
print("[ Loop : "+x+" done ] =========================")
|
||||||
|
print()
|
||||||
|
|
||||||
|
if os.path.isdir("Samples/") == False:
|
||||||
|
os.mkdir("Samples/")
|
||||||
|
if os.path.isdir("Samples/%s"%x) == False:
|
||||||
|
os.mkdir("Samples/%s"%x)
|
||||||
|
if os.path.isdir("Samples/%s/"%x+str(date)) == False:
|
||||||
|
os.mkdir("Samples/%s/"%x+str(date))
|
||||||
|
with open("Samples/%s/"%x+str(date)+"/"+x+str(xi)+".vezpal2","w+") as oop:
|
||||||
|
main=[]
|
||||||
|
main.append(resultfinal) #prediction
|
||||||
|
main.append(list(df2['Close']))
|
||||||
|
main.append(date_col)
|
||||||
|
# main.append(3)
|
||||||
|
# main.append([0])
|
||||||
|
json.dump(main,oop)
|
||||||
|
print("Time for %s :"%x,time.time()-start)
|
||||||
|
temp_time.append(time.time()-start)
|
||||||
|
times.append(temp_time)
|
||||||
|
return times
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
current_date=date.today()
|
||||||
|
if os.path.isdir("Loss/") == False:
|
||||||
|
os.mkdir("Loss/")
|
||||||
|
if os.path.isdir("Loss/"+str(current_date)) == False:
|
||||||
|
os.mkdir("Loss/"+str(current_date))
|
||||||
|
|
||||||
|
loss_file = time.strftime("%Y%m%d-%H%M%S")
|
||||||
|
loss_file = "Loss/"+str(date.today())+"/"+loss_file
|
||||||
|
global_start = time.time()
|
||||||
|
profile = cProfile.Profile()
|
||||||
|
main_func = "automaton(simulation_size,current_date)"
|
||||||
|
|
||||||
|
ps = pstats.Stats(profile.run(main_func))
|
||||||
|
print("Overall time consumption ", str(time.time()-global_start))
|
||||||
|
ps.dump_stats("./Cprofile_model_01.ps")
|
||||||
|
|
616
main_model_django.py
Normal file
616
main_model_django.py
Normal file
@ -0,0 +1,616 @@
|
|||||||
|
import os
|
||||||
|
import sys
|
||||||
|
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
|
||||||
|
import tensorflow as tf
|
||||||
|
import numpy as np
|
||||||
|
import matplotlib.animation as animation
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
import seaborn as sns
|
||||||
|
import pandas as pd
|
||||||
|
from sklearn.preprocessing import MinMaxScaler
|
||||||
|
from datetime import datetime
|
||||||
|
from datetime import timedelta
|
||||||
|
from tqdm import tqdm
|
||||||
|
sns.set()
|
||||||
|
import json
|
||||||
|
tf.compat.v1.random.set_random_seed(1234)
|
||||||
|
from matplotlib import style
|
||||||
|
# import matplotlib.backends.backend_qt5agg
|
||||||
|
# %matplotlib qt
|
||||||
|
style.use('ggplot')
|
||||||
|
import math
|
||||||
|
import yfinance as yf
|
||||||
|
import time
|
||||||
|
from datetime import date, timedelta
|
||||||
|
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
|
||||||
|
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
|
||||||
|
import cProfile
|
||||||
|
import pstats
|
||||||
|
|
||||||
|
# In[34]:
|
||||||
|
|
||||||
|
tf.compat.v1.disable_eager_execution()
|
||||||
|
# INITIAL VARS
|
||||||
|
test_size = 14
|
||||||
|
simulation_size = 1
|
||||||
|
|
||||||
|
# MODEL VARS
|
||||||
|
num_layers = 2
|
||||||
|
size_layer = 128
|
||||||
|
timestamp = 7
|
||||||
|
epoch = 20
|
||||||
|
dropout_rate = 0.8
|
||||||
|
prediction_gap = 5000
|
||||||
|
future_day = test_size
|
||||||
|
learning_rate = 0.01
|
||||||
|
graph_loss = []
|
||||||
|
|
||||||
|
# In[35]:
|
||||||
|
# Necessary Dirs
|
||||||
|
# def date_manage(date1,date2=None):
|
||||||
|
# if date2 is None:
|
||||||
|
# date2=date1+timedelta(days=365)
|
||||||
|
# date_col=[]
|
||||||
|
# for n in range(int ((date2 - date1).days)+1):
|
||||||
|
# date_col.append(date1 + timedelta(n))
|
||||||
|
# weekdays = [5,6]
|
||||||
|
# date_result=[]
|
||||||
|
# for dt in date_col:
|
||||||
|
# if dt.weekday() not in weekdays:
|
||||||
|
# dt.strftime("%Y-%m-%d")
|
||||||
|
# return date_result
|
||||||
|
|
||||||
|
|
||||||
|
# In[36]:
|
||||||
|
def loss_animate(ax, i):
|
||||||
|
json_loss = pd.DataFrame(total_loss)
|
||||||
|
ax.plot(i)
|
||||||
|
return ax
|
||||||
|
|
||||||
|
|
||||||
|
def loader(symbol,test_size,date):
|
||||||
|
# dateparse = lambda dates : pd.datetime.strptime(dates,'%Y-%m')
|
||||||
|
# df = pd.read_csv('../dataset/IBMCUT.csv',parse_dates=['Date'], index_col = 'Date', date_parser=dateparse)
|
||||||
|
|
||||||
|
df=yf.Ticker(symbol)
|
||||||
|
# df=df.history(period="1y",interval="1d")
|
||||||
|
# df=df.history(start=date-timedelta(days=365),end=date,interval="1d")
|
||||||
|
df=df.history(start=date-timedelta(days=365*3),end=date,interval="1d")
|
||||||
|
df=df.reset_index(level=0)
|
||||||
|
|
||||||
|
# df=df.drop(columns=['Dividends'], axis=1)
|
||||||
|
df=df.drop(columns=['Stock Splits'], axis=1)
|
||||||
|
|
||||||
|
df['Up'] = df['High'].ewm(span=6,adjust=False).mean() + 2* df['High'].rolling(window=6).std()
|
||||||
|
df['Down']= df['Low'].ewm(span=8,adjust=False).mean() - 2* df['Low'].rolling(window=8).std()
|
||||||
|
|
||||||
|
df=df.dropna()
|
||||||
|
df=df.drop(df.tail(7).index)
|
||||||
|
|
||||||
|
date_ori = pd.to_datetime(df.iloc[:, 0]).tolist()
|
||||||
|
# for i in range(test_size):
|
||||||
|
# #date_ori.append(date_ori[-1] + timedelta(days = 1))
|
||||||
|
# add=1
|
||||||
|
# while ((date_ori[-1]) + timedelta(days = add)).weekday() in [5,6]:
|
||||||
|
# add=add+1
|
||||||
|
# date_ori.append(date_ori[-1] + timedelta(days = add))
|
||||||
|
date_ori = pd.Series(date_ori).dt.strftime(date_format = '%Y-%m-%d').tolist()
|
||||||
|
print(len(df),len(date_ori))
|
||||||
|
return df,date_ori
|
||||||
|
|
||||||
|
def trueloader(symbol,test_size,date):
|
||||||
|
#df2 = pd.read_csv(symbol)
|
||||||
|
#print("LENDF2:",len(df2))
|
||||||
|
|
||||||
|
df2 = yf.Ticker(symbol)
|
||||||
|
# df2 = df2.history(start=date-timedelta(days=365),end=date,interval="1d")
|
||||||
|
df2 = df2.history(start=date-timedelta(days=365*3),end=date,interval="1d")
|
||||||
|
df2 = df2.reset_index(level=0)
|
||||||
|
df2 = df2.drop(columns=['Dividends'], axis=1)
|
||||||
|
df2 = df2.drop(columns=['Stock Splits'], axis=1)
|
||||||
|
df2 = df2.drop(df2.head(7).index)
|
||||||
|
# df2 = df2.drop(df2.tail(test_size).index)
|
||||||
|
|
||||||
|
return df2
|
||||||
|
|
||||||
|
# In[38]:
|
||||||
|
|
||||||
|
def preproc(df):
|
||||||
|
minmax = MinMaxScaler().fit(df.iloc[:,1:9].astype('float32')) # Close, Volume, and all
|
||||||
|
df_log = minmax.transform(df.iloc[:, 1:9].astype('float32')) # Close, Volume, and all
|
||||||
|
df_log = pd.DataFrame(df_log)
|
||||||
|
df_log.head()
|
||||||
|
return df_log,minmax
|
||||||
|
|
||||||
|
|
||||||
|
# In[39]:
|
||||||
|
|
||||||
|
|
||||||
|
class Model:
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
learning_rate,
|
||||||
|
num_layers,
|
||||||
|
size,
|
||||||
|
size_layer,
|
||||||
|
output_size,
|
||||||
|
forget_bias = 0.1,
|
||||||
|
):
|
||||||
|
def lstm_cell(size_layer):
|
||||||
|
#print("ASDasdasdasd",len(tf.compat.v1.nn.rnn_cell.LSTMCell(size_layer, state_is_tuple = False))
|
||||||
|
return tf.compat.v1.nn.rnn_cell.LSTMCell(size_layer, state_is_tuple = False)
|
||||||
|
|
||||||
|
rnn_cells = tf.compat.v1.nn.rnn_cell.MultiRNNCell(
|
||||||
|
[lstm_cell(size_layer) for _ in range(num_layers)],
|
||||||
|
state_is_tuple = False,
|
||||||
|
)
|
||||||
|
self.X = tf.compat.v1.placeholder(tf.float32, (None, None, size))
|
||||||
|
self.Y = tf.compat.v1.placeholder(tf.float32, (None, output_size))
|
||||||
|
# self.X = tf.keras.Input((None, size),dtype=tf.float32)
|
||||||
|
# self.Y = tf.keras.Input((output_size),dtype=tf.float32)
|
||||||
|
drop = tf.compat.v1.nn.rnn_cell.DropoutWrapper(
|
||||||
|
rnn_cells, output_keep_prob = forget_bias
|
||||||
|
)
|
||||||
|
# print("XXXX:",self.X)
|
||||||
|
# print("XXXX:",self.X.shape)
|
||||||
|
# print("XXXX:",self.Y)
|
||||||
|
# print("XXXX:",self.Y.shape)
|
||||||
|
#print("LOOOASDSDASD")
|
||||||
|
self.hidden_layer = tf.compat.v1.placeholder(
|
||||||
|
tf.float32, (None, num_layers * 2 * size_layer)
|
||||||
|
)
|
||||||
|
self.outputs, self.last_state = tf.compat.v1.nn.dynamic_rnn(
|
||||||
|
drop, self.X, initial_state = self.hidden_layer, dtype = tf.float32
|
||||||
|
)
|
||||||
|
#rint("INIDIA",self.outputs)
|
||||||
|
self.logits = tf.compat.v1.layers.dense(self.outputs[-1], output_size)
|
||||||
|
self.cost = tf.reduce_mean(tf.square(self.Y - self.logits))
|
||||||
|
self.optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate).minimize(
|
||||||
|
self.cost
|
||||||
|
)
|
||||||
|
#print("cost:",self.cost)
|
||||||
|
#print("cost:",self.optimizer)
|
||||||
|
|
||||||
|
def calculate_accuracy(real, predict):
|
||||||
|
real = np.array(real) + 1
|
||||||
|
predict = np.array(predict) + 1
|
||||||
|
percentage = 1 - np.sqrt(np.mean(np.square((real - predict) / real)))
|
||||||
|
return percentage * 100
|
||||||
|
|
||||||
|
def anchor(signal, weight):
|
||||||
|
buffer = []
|
||||||
|
last = signal[0]
|
||||||
|
for i in signal:
|
||||||
|
smoothed_val = last * weight + (1 - weight) * i
|
||||||
|
buffer.append(smoothed_val)
|
||||||
|
last = smoothed_val
|
||||||
|
return buffer
|
||||||
|
|
||||||
|
|
||||||
|
# In[40]:
|
||||||
|
def main_train(df_beta, df_train, df, minmax):
|
||||||
|
|
||||||
|
modelnn = Model(
|
||||||
|
learning_rate, num_layers, df_beta.shape[1], size_layer, df_beta.shape[1], dropout_rate
|
||||||
|
)
|
||||||
|
sess = tf.compat.v1.Session()
|
||||||
|
sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(intra_op_parallelism_threads=64,inter_op_parallelism_threads=64))
|
||||||
|
sess.run(tf.compat.v1.global_variables_initializer())
|
||||||
|
pbar = tqdm(range(10), desc = 'Main train loop')
|
||||||
|
for i in pbar:
|
||||||
|
init_value = np.zeros((1, num_layers * 2 * size_layer))
|
||||||
|
total_loss, total_acc = [], []
|
||||||
|
for k in range(0, df_train.shape[0] - 1, timestamp):
|
||||||
|
index = min(k + timestamp, df_train.shape[0] - 1)
|
||||||
|
batch_x = np.expand_dims(
|
||||||
|
df_train.iloc[k : index, :].values, axis = 0
|
||||||
|
)
|
||||||
|
|
||||||
|
batch_y = df_train.iloc[k + 1 : index + 1, :].values
|
||||||
|
#print("BATCH_X:",batch_x)
|
||||||
|
#print("BATCH_Y:",batch_y)
|
||||||
|
logits, last_state,__,loss = sess.run(
|
||||||
|
[modelnn.logits, modelnn.last_state,modelnn.optimizer, modelnn.cost],
|
||||||
|
feed_dict = {
|
||||||
|
modelnn.X: batch_x,
|
||||||
|
modelnn.Y: batch_y,
|
||||||
|
modelnn.hidden_layer: init_value,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
init_value = last_state
|
||||||
|
total_loss.append(loss)
|
||||||
|
total_acc.append(calculate_accuracy(batch_y[:, 0], logits[:, 0]))
|
||||||
|
# json_loss.to_json("./loss.json")
|
||||||
|
graph_loss.append(np.mean(total_loss))
|
||||||
|
|
||||||
|
np.save(loss_file, np.array(graph_loss))
|
||||||
|
pbar.set_postfix(cost = np.mean(total_loss), min_acc = np.min(total_acc), mean_acc=np.mean(total_acc))
|
||||||
|
tf.compat.v1.reset_default_graph()
|
||||||
|
|
||||||
|
|
||||||
|
def forecast(df_beta,df_train,df,minmax):
|
||||||
|
# print("DF_BETA:",df_beta)
|
||||||
|
# print("DF_TRAIN:",df_train)
|
||||||
|
# tf.compat.v1.variable_scope("AAA", reuse=True)
|
||||||
|
modelnn = Model(
|
||||||
|
learning_rate, num_layers, df_beta.shape[1], size_layer, df_beta.shape[1], dropout_rate
|
||||||
|
)
|
||||||
|
# print("MODELX: ",modelnn.X)
|
||||||
|
# print("MODELY: ",modelnn.Y)
|
||||||
|
# print("MODELLayer: ",modelnn.hidden_layer)
|
||||||
|
sess = tf.compat.v1.Session()
|
||||||
|
sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(intra_op_parallelism_threads=64,inter_op_parallelism_threads=64))
|
||||||
|
sess.run(tf.compat.v1.global_variables_initializer())
|
||||||
|
date_ori = pd.to_datetime(df.iloc[:, 0]).tolist()
|
||||||
|
# print("INI___!:",df_train.shape[0] - 1, timestamp)
|
||||||
|
# print(df_train.shape[0])
|
||||||
|
pbar = tqdm(range(epoch), desc = 'train loop')
|
||||||
|
for i in pbar:
|
||||||
|
# init_value = np.zeros((1, num_layers * 2 * size_layer))
|
||||||
|
total_loss, total_acc = [], []
|
||||||
|
for k in range(0, df_train.shape[0] - 1, timestamp):
|
||||||
|
init_value = np.zeros((1, num_layers * 2 * size_layer))
|
||||||
|
index = min(k + timestamp, df_train.shape[0] - 1)
|
||||||
|
batch_x = np.expand_dims(
|
||||||
|
df_train.iloc[k : index, :].values, axis = 0
|
||||||
|
)
|
||||||
|
|
||||||
|
batch_y = df_train.iloc[k + 1 : index + 1, :].values
|
||||||
|
#print("BATCH_X:",batch_x)
|
||||||
|
#print("BATCH_Y:",batch_y)
|
||||||
|
logits, last_state,__,loss = sess.run(
|
||||||
|
[modelnn.logits, modelnn.last_state,modelnn.optimizer, modelnn.cost],
|
||||||
|
feed_dict = {
|
||||||
|
modelnn.X: batch_x,
|
||||||
|
modelnn.Y: batch_y,
|
||||||
|
modelnn.hidden_layer: init_value,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
#print("BATCHX:",batch_x)
|
||||||
|
# print("MODELX: ",modelnn.X)
|
||||||
|
# print("MODELY: ",modelnn.Y)
|
||||||
|
# print("MODELLayer: ",modelnn.hidden_layer)
|
||||||
|
#print("OUTSSS:",len(outs))
|
||||||
|
#print("opt:",opt)
|
||||||
|
#print("outs1",batch_x[0])
|
||||||
|
#print("outs2",outs[1])
|
||||||
|
#print("outs3",outs[2])
|
||||||
|
#print("outs4",outs[3])
|
||||||
|
#input()
|
||||||
|
init_value = last_state
|
||||||
|
total_loss.append(loss)
|
||||||
|
#print("LOGITS:",logits[:, 0])
|
||||||
|
total_acc.append(calculate_accuracy(batch_y[:, 0], logits[:, 0]))
|
||||||
|
# json_loss.to_json("./loss.json")
|
||||||
|
graph_loss.append(np.mean(total_loss))
|
||||||
|
|
||||||
|
np.save(loss_file, np.array(graph_loss))
|
||||||
|
pbar.set_postfix(cost = np.mean(total_loss), min_acc = np.min(total_acc), mean_acc=np.mean(total_acc))
|
||||||
|
future_day = test_size
|
||||||
|
|
||||||
|
output_predict = np.zeros((df_train.shape[0] + future_day, df_train.shape[1]))
|
||||||
|
output_predict[0] = df_train.iloc[0]
|
||||||
|
upper_b = (df_train.shape[0] // timestamp) * timestamp
|
||||||
|
init_value = np.zeros((1, num_layers * 2 * size_layer))
|
||||||
|
|
||||||
|
for k in range(0, (df_train.shape[0] // timestamp) * timestamp, timestamp):
|
||||||
|
out_logits, last_state = sess.run(
|
||||||
|
[modelnn.logits, modelnn.last_state],
|
||||||
|
feed_dict = {
|
||||||
|
modelnn.X: np.expand_dims(
|
||||||
|
df_train.iloc[k : k + timestamp], axis = 0
|
||||||
|
),
|
||||||
|
modelnn.hidden_layer: init_value,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
init_value = last_state
|
||||||
|
output_predict[k + 1 : k + timestamp + 1] = out_logits
|
||||||
|
|
||||||
|
if upper_b != df_train.shape[0]:
|
||||||
|
out_logits, last_state = sess.run(
|
||||||
|
[modelnn.logits, modelnn.last_state],
|
||||||
|
feed_dict = {
|
||||||
|
modelnn.X: np.expand_dims(df_train.iloc[upper_b:], axis = 0),
|
||||||
|
modelnn.hidden_layer: init_value,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
output_predict[upper_b + 1 : df_train.shape[0] + 1] = out_logits
|
||||||
|
future_day -= 1
|
||||||
|
date_ori.append(date_ori[-1] + timedelta(days = 1))
|
||||||
|
|
||||||
|
init_value = last_state
|
||||||
|
|
||||||
|
for i in range(future_day):
|
||||||
|
o = output_predict[-future_day - timestamp + i:-future_day + i]
|
||||||
|
out_logits, last_state = sess.run(
|
||||||
|
[modelnn.logits, modelnn.last_state],
|
||||||
|
feed_dict = {
|
||||||
|
modelnn.X: np.expand_dims(o, axis = 0),
|
||||||
|
modelnn.hidden_layer: init_value,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
init_value = last_state
|
||||||
|
output_predict[-future_day + i] = out_logits[-1]
|
||||||
|
date_ori.append(date_ori[-1] + timedelta(days = 1))
|
||||||
|
|
||||||
|
output_predict = minmax.inverse_transform(output_predict)
|
||||||
|
deep_future = anchor(output_predict[:, 0], 0.3)
|
||||||
|
sess.close()
|
||||||
|
sess.__del__()
|
||||||
|
tf.compat.v1.reset_default_graph()
|
||||||
|
return deep_future
|
||||||
|
|
||||||
|
|
||||||
|
# In[41]:
|
||||||
|
|
||||||
|
|
||||||
|
def newaccuration(accepted_results,truetrend):
|
||||||
|
hasilutama=0
|
||||||
|
indexbagus=0
|
||||||
|
truest=0
|
||||||
|
predictest=0
|
||||||
|
for i,x in enumerate(accepted_results):
|
||||||
|
a=x[-(test_size+2):]
|
||||||
|
|
||||||
|
#a=x[:((test_size+2)/2)]
|
||||||
|
print("a",a)
|
||||||
|
b=truetrend[-(test_size+1):]
|
||||||
|
#print("b",b)
|
||||||
|
hasil=0
|
||||||
|
true=[]
|
||||||
|
predict=[]
|
||||||
|
for xy in range(1,len((a))):
|
||||||
|
if a[xy]<a[xy-1]:
|
||||||
|
predict.append("Down")
|
||||||
|
else:
|
||||||
|
predict.append("Up")
|
||||||
|
if b[xy]<b[xy-1]:
|
||||||
|
|
||||||
|
true.append("Down")
|
||||||
|
else:
|
||||||
|
true.append("Up")
|
||||||
|
|
||||||
|
print(true)
|
||||||
|
print(predict)
|
||||||
|
for xz in range(len(true)):
|
||||||
|
if true[xz]==predict[xz]:
|
||||||
|
hasil=hasil+1
|
||||||
|
if hasil > hasilutama:
|
||||||
|
hasilutama=hasil
|
||||||
|
indexbagus=i
|
||||||
|
truest=true
|
||||||
|
predictest=predict
|
||||||
|
salah=[]
|
||||||
|
for xz in range(len(truest)):
|
||||||
|
if truest[xz]!=predictest[xz]:
|
||||||
|
salah.append(xz)
|
||||||
|
#if xz!=0:
|
||||||
|
#salah.append(xz-1)
|
||||||
|
#print("INI:",b)
|
||||||
|
print("TRUEST",truest)
|
||||||
|
print("predictest",predictest)
|
||||||
|
return hasilutama,indexbagus,salah
|
||||||
|
|
||||||
|
# In[42]:
|
||||||
|
|
||||||
|
|
||||||
|
def betaforecast(simulationsize,dfx,dftrain,df,df2,minmax):
|
||||||
|
results = []
|
||||||
|
for i in range(simulationsize):
|
||||||
|
forecast_res = forecast(df,dftrain,dfx,minmax)
|
||||||
|
results.append(forecast_res)
|
||||||
|
accepted_results = []
|
||||||
|
|
||||||
|
while not (np.array(results[0][-test_size:]) < np.min(dfx['Close'])).sum() == 0 and (np.array(results[0][-test_size:]) > np.max(dfx['Close']) * 2).sum() == 0:
|
||||||
|
print("++++++++++++++++++++++++")
|
||||||
|
print("Forecast Recalled...")
|
||||||
|
results[0]=forecast(df,dftrain,dfx,minmax)
|
||||||
|
return results[0]
|
||||||
|
|
||||||
|
|
||||||
|
# In[43]:
|
||||||
|
|
||||||
|
|
||||||
|
def interval(p1,p2):
|
||||||
|
return abs((p1) - (p2))
|
||||||
|
|
||||||
|
|
||||||
|
# In[44]:
|
||||||
|
|
||||||
|
|
||||||
|
def checkaccuracy2(true):
|
||||||
|
avg=[]
|
||||||
|
for x in range(len(true)-7):
|
||||||
|
avg.append(interval(true[x],true[x+1]))
|
||||||
|
average=sum(avg) / len(avg)
|
||||||
|
return average
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# In[45]:
|
||||||
|
|
||||||
|
|
||||||
|
def checkaccuracy(predict,true,filterx, test_size):
|
||||||
|
print("True Length: ",len(true))
|
||||||
|
print("Predict Length: ",len(predict))
|
||||||
|
|
||||||
|
# avg=[]
|
||||||
|
|
||||||
|
# for x in range(len(true)-5):
|
||||||
|
# avg.append(interval(true[x],predict[x]))
|
||||||
|
# average=sum(avg) / len(avg)
|
||||||
|
# print("AVG1:",average)
|
||||||
|
# print("AVG2:",threshold)
|
||||||
|
|
||||||
|
temp_predict=predict[-test_size:]
|
||||||
|
temp_true=true[-test_size:]
|
||||||
|
|
||||||
|
# avg2=interval(max(predict),min(predict))
|
||||||
|
count=0
|
||||||
|
print("------------------------------------")
|
||||||
|
for x in range(test_size):
|
||||||
|
# acc_var1 = temp_true[x]-(1/filterx*temp_true[x])
|
||||||
|
acc_var1 = temp_true[x]-(filterx/10)
|
||||||
|
acc_var2 = temp_predict[x]
|
||||||
|
# acc_var3 = temp_true[x]+(1/filterx*temp_true[x])
|
||||||
|
acc_var3 = temp_true[x]+(filterx/10)
|
||||||
|
acc_condition = acc_var1 <= acc_var2 <= acc_var3
|
||||||
|
# print("Var 1 : ",acc_var1)
|
||||||
|
# print("Var 2 : ",acc_var2)
|
||||||
|
# print("Var 3 : ",acc_var3)
|
||||||
|
# print("Day "+str(x+1)+" "+str(int(acc_var1))+" "+str(int(acc_var2))+" "+str(int(acc_var3))+" : ",acc_condition)
|
||||||
|
print("Day "+str(x+1)+", Price : "+str(int(temp_true[x]))+" ,Gap = "+str(int(abs(temp_predict[x]-temp_true[x])))+" : ",acc_condition)
|
||||||
|
if (acc_condition):
|
||||||
|
count=count+1
|
||||||
|
print("------------------------------------")
|
||||||
|
if count>7:
|
||||||
|
print("Result True")
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
print("Result False")
|
||||||
|
return False
|
||||||
|
|
||||||
|
# if average>threshold:
|
||||||
|
# return False
|
||||||
|
# else:
|
||||||
|
# return True
|
||||||
|
|
||||||
|
|
||||||
|
# In[46]:
|
||||||
|
|
||||||
|
|
||||||
|
def findthreshold(simulationsize,dfx,dftrain,df,df2,minmax):
|
||||||
|
results=[]
|
||||||
|
for i in range(simulationsize):
|
||||||
|
results.append(forecast(df,dftrain,dfx,minmax))
|
||||||
|
accepted_results = []
|
||||||
|
|
||||||
|
for r in results:
|
||||||
|
|
||||||
|
if (np.array(r[-test_size:]) < np.min(dfx['Close'])).sum() == 0 and (np.array(r[-test_size:]) > np.max(dfx['Close']) * 2).sum() == 0:
|
||||||
|
accepted_results.append(r)
|
||||||
|
|
||||||
|
finalavg=999999
|
||||||
|
for o in accepted_results:
|
||||||
|
avg=[]
|
||||||
|
for x in range(len(o)-5):
|
||||||
|
avg.append(interval(o[x],df2[x]))
|
||||||
|
average=sum(avg) / len(avg)
|
||||||
|
if average<=finalavg:
|
||||||
|
finalavg=average
|
||||||
|
|
||||||
|
return finalavg
|
||||||
|
|
||||||
|
def temp_data(date, xi, resultfinal, df2, date_col,x):
|
||||||
|
print("Called . . . ")
|
||||||
|
if os.path.isdir("TempData/") == False:
|
||||||
|
os.mkdir("TempData/")
|
||||||
|
if os.path.isdir("TempData/%s"%x) == False:
|
||||||
|
os.mkdir("TempData/%s"%x)
|
||||||
|
if os.path.isdir("TempData/%s/"%x+str(date)) == False:
|
||||||
|
os.mkdir("TempData/%s/"%x+str(date))
|
||||||
|
with open("TempData/%s/"%x+str(date)+"/"+x+str(xi)+".vezpal2","w+") as oop:
|
||||||
|
main=[]
|
||||||
|
main.append(resultfinal) #prediction
|
||||||
|
main.append(list(df2['Close']))
|
||||||
|
main.append(date_col)
|
||||||
|
# main.append(3)
|
||||||
|
# main.append([0])
|
||||||
|
json.dump(main,oop)
|
||||||
|
|
||||||
|
def automaton(simulationsize,date, ticker):
|
||||||
|
|
||||||
|
# symbols=["AAPL"]
|
||||||
|
# symbols = sys.argv[1:]
|
||||||
|
symbols = []
|
||||||
|
symbols.append(ticker)
|
||||||
|
times=[]
|
||||||
|
for x in symbols:
|
||||||
|
temp_time=[]
|
||||||
|
temp_time.append(x)
|
||||||
|
counter=0
|
||||||
|
validity=0
|
||||||
|
df,date_col=loader(x,test_size,date)
|
||||||
|
#print(type(df))
|
||||||
|
dfx=df
|
||||||
|
#print("ASDSAD")
|
||||||
|
df2=trueloader(x,test_size,date)
|
||||||
|
df,minmax=preproc(df)
|
||||||
|
dftrain=df
|
||||||
|
wrong=[1,2,3,4,5]
|
||||||
|
#avg=checkaccuracy2(list(df2["Close"]))
|
||||||
|
#start=time.time()
|
||||||
|
#avg=findthreshold(50,dfx,dftrain,df,list(df2["Close"]),minmax)
|
||||||
|
#temp_time.append(time.time()-start)
|
||||||
|
start=time.time()
|
||||||
|
filterx = int(prediction_gap)
|
||||||
|
able=False
|
||||||
|
print("============== || Initial Train || =============")
|
||||||
|
main_train(df,dftrain,dfx,minmax)
|
||||||
|
for xi in range(5):
|
||||||
|
decision=False
|
||||||
|
while (decision==False):
|
||||||
|
print()
|
||||||
|
print("====== [ Foreacasting Attempt : "+str(counter+1)+" ] ===========")
|
||||||
|
print("====== [ Progress : "+str(xi)+"/5 ] ")
|
||||||
|
resultfinal=betaforecast(simulationsize,dfx,dftrain,df,df2,minmax)
|
||||||
|
#validity=valid
|
||||||
|
decision=checkaccuracy(resultfinal,list(df2["Close"]),filterx, test_size)
|
||||||
|
# wrong=invalid
|
||||||
|
if decision==True:
|
||||||
|
able=True
|
||||||
|
print("ABLE")
|
||||||
|
print(str(filterx))
|
||||||
|
if counter > 100 and decision != True:
|
||||||
|
counter = 0
|
||||||
|
filterx=filterx+1
|
||||||
|
print("Filter X reduced to : "+str(filterx))
|
||||||
|
print("Decision Status : ", decision)
|
||||||
|
print("**************************************")
|
||||||
|
# avg=avg+(1/3*avg)
|
||||||
|
if filterx>1000:
|
||||||
|
print("====== [ GG, we gave up] =====")
|
||||||
|
continue
|
||||||
|
counter=counter+1
|
||||||
|
temp_data(date, xi, resultfinal, df2, date_col, x)
|
||||||
|
print("[ Loop : "+x+" done ] =========================")
|
||||||
|
print()
|
||||||
|
|
||||||
|
if os.path.isdir("Samples/") == False:
|
||||||
|
os.mkdir("Samples/")
|
||||||
|
if os.path.isdir("Samples/%s"%x) == False:
|
||||||
|
os.mkdir("Samples/%s"%x)
|
||||||
|
if os.path.isdir("Samples/%s/"%x+str(date)) == False:
|
||||||
|
os.mkdir("Samples/%s/"%x+str(date))
|
||||||
|
with open("Samples/%s/"%x+str(date)+"/"+x+str(xi)+".vezpal2","w+") as oop:
|
||||||
|
main=[]
|
||||||
|
main.append(resultfinal) #prediction
|
||||||
|
main.append(list(df2['Close']))
|
||||||
|
main.append(date_col)
|
||||||
|
# main.append(3)
|
||||||
|
# main.append([0])
|
||||||
|
json.dump(main,oop)
|
||||||
|
print("Time for %s :"%x,time.time()-start)
|
||||||
|
temp_time.append(time.time()-start)
|
||||||
|
times.append(temp_time)
|
||||||
|
return times
|
||||||
|
|
||||||
|
def django_call(ticker):
|
||||||
|
|
||||||
|
# tf.compat.v1.reset_default_graph()
|
||||||
|
automaton(simulation_size, current_date, ticker)
|
||||||
|
|
||||||
|
return "Done"
|
||||||
|
|
||||||
|
current_date=date.today()
|
||||||
|
|
||||||
|
if os.path.isdir("Loss/") == False:
|
||||||
|
os.mkdir("Loss/")
|
||||||
|
if os.path.isdir("Loss/"+str(current_date)) == False:
|
||||||
|
os.mkdir("Loss/"+str(current_date))
|
||||||
|
|
||||||
|
loss_file = time.strftime("%Y%m%d-%H%M%S")
|
||||||
|
loss_file = "Loss/"+str(date.today())+"/"+loss_file
|
||||||
|
|
||||||
|
# ticker = input("AAA : ")
|
||||||
|
# django_call(ticker)
|
156
multiplot.py
Normal file
156
multiplot.py
Normal file
@ -0,0 +1,156 @@
|
|||||||
|
import matplotlib.pyplot as plt
|
||||||
|
import json
|
||||||
|
from pylab import *
|
||||||
|
|
||||||
|
import os
|
||||||
|
from datetime import datetime, date, timedelta
|
||||||
|
from matplotlib.dates import date2num
|
||||||
|
import yfinance as yf
|
||||||
|
import sys
|
||||||
|
import glob
|
||||||
|
import numpy as np
|
||||||
|
# path='../Febrian/Bang Nino/Samples/NEE/2020-05-08/'
|
||||||
|
def return_plot(plotax,ticks,filters):
|
||||||
|
#my_args = sys.argv[1:]
|
||||||
|
#tick = my_args[0]
|
||||||
|
#filter=int(my_args[1])
|
||||||
|
tick=ticks
|
||||||
|
filter=filters
|
||||||
|
visual=100
|
||||||
|
print(tick)
|
||||||
|
# path='./Samples/'+tick+'/2020-12-07/'
|
||||||
|
path='./Samples/'+tick+'/'
|
||||||
|
all_dirs = glob.glob(path+"*")
|
||||||
|
print(path)
|
||||||
|
print(all_dirs)
|
||||||
|
latest_dir = max(all_dirs, key=os.path.getctime)
|
||||||
|
print("Latest Data = ", latest_dir)
|
||||||
|
latest_dir = latest_dir + "/"
|
||||||
|
listsymbol = os.listdir(latest_dir+'/')
|
||||||
|
print(listsymbol)
|
||||||
|
|
||||||
|
with open(latest_dir+listsymbol[0],'r') as f:
|
||||||
|
print(latest_dir+listsymbol[0])
|
||||||
|
hasil=json.load(f)
|
||||||
|
# print(hasil)
|
||||||
|
print(len(hasil[0]))
|
||||||
|
print(len(hasil[1]))
|
||||||
|
print(len(hasil[2]))
|
||||||
|
akhir=datetime.strptime(str(hasil[2][-1]),'%Y-%m-%d').date()
|
||||||
|
print("Current Log : ",akhir)
|
||||||
|
extend1=[akhir+timedelta(days=x) for x in range(1,50,1)]
|
||||||
|
extend=[]
|
||||||
|
|
||||||
|
for x in extend1:
|
||||||
|
if x.weekday() not in [5,6]:
|
||||||
|
extend.append(x)
|
||||||
|
|
||||||
|
extend=extend[:14]
|
||||||
|
extend=[str(x) for x in extend]
|
||||||
|
print("ASasdasdsa",extend)
|
||||||
|
# date=date2num(date)
|
||||||
|
date=hasil[2]
|
||||||
|
plotax.plot(hasil[1],color='grey',linewidth=3, linestyle='--', marker='x', alpha=0.8, label="Confirmation")
|
||||||
|
plotax.plot(hasil[2],hasil[1][0:-5],color='b',linewidth=2)
|
||||||
|
# plt.show()
|
||||||
|
|
||||||
|
symbol=yf.Ticker(tick)
|
||||||
|
symbol=symbol.history(start=akhir,end=akhir+timedelta(days=30),interval='1d')
|
||||||
|
# print(symbol)
|
||||||
|
symbol=symbol.drop(symbol.index[0])
|
||||||
|
symbol=symbol.drop(symbol.index[0])
|
||||||
|
symbol=symbol['Close'][0:14].tolist()
|
||||||
|
# plt.plot(extend,symbol,color='g',linewidth=10)
|
||||||
|
avg=[]
|
||||||
|
for ex,x in enumerate(listsymbol):
|
||||||
|
with open(latest_dir+x) as f:
|
||||||
|
print("#################################################")
|
||||||
|
print(x)
|
||||||
|
hasil=json.load(f)
|
||||||
|
# a=hasil[0][-14:][:14]
|
||||||
|
a = hasil[0][-14:][:7]
|
||||||
|
b=hasil[1][-7:]
|
||||||
|
print("A & B Temp :")
|
||||||
|
print(a)
|
||||||
|
print(b)
|
||||||
|
count=0
|
||||||
|
print("Predict - Real ")
|
||||||
|
for x in range(7):
|
||||||
|
print("%.2f" % (a[x]-b[x]))
|
||||||
|
if (b[x]-(filter/10) <= a[x] <= b[x]+(filter/10)):
|
||||||
|
count=count+1
|
||||||
|
if count>5:
|
||||||
|
print("ACCEPTED . . .")
|
||||||
|
predict_val = hasil[0][-14:]
|
||||||
|
avg.append(predict_val)
|
||||||
|
# avg.append(hasil[0][-10][:14])
|
||||||
|
# plt.plot(date+extend,hasil[0], label='Sample %s'%ex, alpha=0.3)
|
||||||
|
print()
|
||||||
|
else:
|
||||||
|
print("NOT ACCEPTED . . .")
|
||||||
|
print()
|
||||||
|
# print(avg)
|
||||||
|
print("#################################################")
|
||||||
|
print(avg)
|
||||||
|
print(len(avg))
|
||||||
|
print("#################################################")
|
||||||
|
avg_total=[]
|
||||||
|
for x in range(len(extend)):
|
||||||
|
temp=[]
|
||||||
|
for a in range(len(avg)):
|
||||||
|
temp.append(avg[a][x])
|
||||||
|
# avg_total.append(sum(temp)/len(temp))
|
||||||
|
mean_pred = np.mean(temp)
|
||||||
|
print(mean_pred)
|
||||||
|
avg_total.append(mean_pred)
|
||||||
|
|
||||||
|
# print(avg_total)
|
||||||
|
|
||||||
|
atas=[x+(1/visual*x) for x in avg_total]
|
||||||
|
bawah=[x-(1/visual*x) for x in avg_total]
|
||||||
|
print([hasil[1][0:-5][-1]]+bawah)
|
||||||
|
print([hasil[1][0:-5][-1]]+atas)
|
||||||
|
print([date[-1]]+extend)
|
||||||
|
plotax.fill_between([date[-1]]+extend,[hasil[1][0:-5][-1]]+bawah,[hasil[1][0:-5][-1]]+atas,alpha=0.2,label='Prediction Band')
|
||||||
|
plotax.plot([date[-1]]+extend,[hasil[1][-5]]+avg_total,color='y',linewidth=1,label='Prediction', marker='x')
|
||||||
|
plotax.grid()
|
||||||
|
# plt.show()
|
||||||
|
|
||||||
|
# print("Date Extended ",(date+extend))
|
||||||
|
symbol = yf.Ticker(tick)
|
||||||
|
symbol = symbol.history(start=akhir,end=akhir+timedelta(days=20),interval='1d')
|
||||||
|
print(symbol)
|
||||||
|
symbol = symbol.drop(symbol.index[0])
|
||||||
|
symbol = symbol.drop(symbol.index[0])
|
||||||
|
symbol = symbol['Close'][0:14].tolist()
|
||||||
|
# symbol = symbol['Close'].tolist()
|
||||||
|
# print(symbol)
|
||||||
|
# plt.plot(extend[0:len(symbol)],symbol,color='g',label='Actual',linewidth=1)
|
||||||
|
# plt.plot(hasil[1],color='r', linestyle='--', label='Confirmation',linewidth=2)
|
||||||
|
# plt.plot(hasil[2]+extend[0:14],hasil[0],color='b',label='Train',linewidth=2, alpha=0.4)
|
||||||
|
# plt.plot(symbol, label="Symbol Real")
|
||||||
|
# plt.plot(hasil[0], label="Real Predictioh")
|
||||||
|
# print(symbol)
|
||||||
|
# print(symbol)
|
||||||
|
# detail = str(akhir)+"\n"+"Prediction :"+str(avg_total[-1:])+"\n"+"Real : "
|
||||||
|
# plt.text(0.05, 120, detail, color='black', bbox=dict(facecolor='none', edgecolor='black', boxstyle='round,pad=1'))
|
||||||
|
plotax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.01),
|
||||||
|
fancybox=True, shadow=True, ncol=7)
|
||||||
|
plotax.title.set_text(ticks)
|
||||||
|
#plotax.title(tick+" Date: "+str(hasil[2][-1])+" to "+str(extend[-1]))
|
||||||
|
# plt.get_xaxis().set_ticks([])
|
||||||
|
#plotax.xticks([])
|
||||||
|
return plotax
|
||||||
|
|
||||||
|
|
||||||
|
#subplots_adjust(hspace=0.000)
|
||||||
|
#symbolss=os.listdir("Samples")
|
||||||
|
symbolss=["GOOGL","FB","MA","SHOP","NEE"]
|
||||||
|
#number_of_subplots=len(os.listdir("Samples"))
|
||||||
|
number_of_subplots=5
|
||||||
|
for i,v in enumerate(range(number_of_subplots)):
|
||||||
|
v = v+1
|
||||||
|
ax1 = subplot(number_of_subplots,1,v)
|
||||||
|
ax1 = return_plot(ax1,symbolss.pop(0),900)
|
||||||
|
plt.tight_layout()
|
||||||
|
plt.show()
|
29
parser.py
Normal file
29
parser.py
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
#!/bin/python3
|
||||||
|
|
||||||
|
import json
|
||||||
|
import numpy as np
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
|
||||||
|
file_json=open("./Febrian_sample/AAPL/2020-05-08/AAPL0.vezpal2")
|
||||||
|
data = json.loads(file_json.read())
|
||||||
|
|
||||||
|
x = []
|
||||||
|
y = []
|
||||||
|
fill_zero = []
|
||||||
|
fill_zero = [0,0,0,0,0]
|
||||||
|
print(len(data))
|
||||||
|
x = data[0]
|
||||||
|
y = data[1]# +fill_zero
|
||||||
|
z = data[2]# +(fill_zero*2)
|
||||||
|
# print(x)
|
||||||
|
|
||||||
|
print(len(x))
|
||||||
|
print(len(y))
|
||||||
|
print(len(z))
|
||||||
|
plt.xticks(rotation=90)
|
||||||
|
# plt.plot(z,y)
|
||||||
|
# plt.plot(z,x)
|
||||||
|
# plt.plot(z,y)
|
||||||
|
plt.plot(x)
|
||||||
|
plt.plot(y)
|
||||||
|
plt.show()
|
178
plot_end.py
Normal file
178
plot_end.py
Normal file
@ -0,0 +1,178 @@
|
|||||||
|
import matplotlib.pyplot as plt
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
from datetime import datetime, date, timedelta
|
||||||
|
from matplotlib.dates import date2num
|
||||||
|
import yfinance as yf
|
||||||
|
import sys
|
||||||
|
import glob
|
||||||
|
import numpy as np
|
||||||
|
from collections import Counter
|
||||||
|
# path='../Febrian/Bang Nino/Samples/NEE/2020-05-08/'
|
||||||
|
|
||||||
|
my_args = sys.argv[1:]
|
||||||
|
tick = my_args[0]
|
||||||
|
filter=int(my_args[1])
|
||||||
|
visual=50
|
||||||
|
print(tick)
|
||||||
|
# path='./Samples/'+tick+'/2020-12-07/'
|
||||||
|
path='./Samples/'+tick+'/'
|
||||||
|
all_dirs = glob.glob(path+"*")
|
||||||
|
print(path)
|
||||||
|
print(all_dirs)
|
||||||
|
latest_dir = max(all_dirs, key=os.path.getctime)
|
||||||
|
print("Latest Data = ", latest_dir)
|
||||||
|
latest_dir = latest_dir + "/"
|
||||||
|
listsymbol = os.listdir(latest_dir+'/')
|
||||||
|
print(listsymbol)
|
||||||
|
|
||||||
|
with open(latest_dir+listsymbol[0],'r') as f:
|
||||||
|
print(latest_dir+listsymbol[0])
|
||||||
|
hasil=json.load(f)
|
||||||
|
# print(hasil)
|
||||||
|
print(len(hasil[0]))
|
||||||
|
print(len(hasil[1]))
|
||||||
|
print(len(hasil[2]))
|
||||||
|
akhir=datetime.strptime(str(hasil[2][-1]),'%Y-%m-%d').date()
|
||||||
|
print("Current Log : ",akhir)
|
||||||
|
extend1=[akhir+timedelta(days=x) for x in range(1,50,1)]
|
||||||
|
extend=[]
|
||||||
|
|
||||||
|
for x in extend1:
|
||||||
|
if x.weekday() not in [5,6]:
|
||||||
|
extend.append(x)
|
||||||
|
|
||||||
|
extend=extend[:14]
|
||||||
|
extend=[str(x) for x in extend]
|
||||||
|
print("ASasdasdsa",extend)
|
||||||
|
# date=date2num(date)
|
||||||
|
date=hasil[2]
|
||||||
|
#plt.plot(([date[-1]]+extend)[-7:],hasil[1][-7:],color='grey',linewidth=3, linestyle='--', marker='x', alpha=0.8, label="Confirmation")
|
||||||
|
plt.plot(hasil[2][-10:],hasil[1][0:-7][-10:],color='b',linewidth=2)
|
||||||
|
# plt.show()
|
||||||
|
|
||||||
|
symbol=yf.Ticker(tick)
|
||||||
|
symbol=symbol.history(start=akhir,end=akhir+timedelta(days=30),interval='1d')
|
||||||
|
# print(symbol)
|
||||||
|
symbol=symbol.drop(symbol.index[0])
|
||||||
|
symbol=symbol.drop(symbol.index[0])
|
||||||
|
symbol=symbol['Close'][0:14].tolist()
|
||||||
|
# plt.plot(extend,symbol,color='g',linewidth=10)
|
||||||
|
avg=[]
|
||||||
|
gap_avg = []
|
||||||
|
for ex,x in enumerate(listsymbol):
|
||||||
|
gap_val = []
|
||||||
|
with open(latest_dir+x) as f:
|
||||||
|
print("#################################################")
|
||||||
|
print(x)
|
||||||
|
hasil=json.load(f)
|
||||||
|
# a=hasil[0][-14:][:14]
|
||||||
|
a = hasil[0][-14:][:7]
|
||||||
|
b=hasil[1][-7:]
|
||||||
|
print("A & B Temp :")
|
||||||
|
print(a)
|
||||||
|
print(b)
|
||||||
|
count=0
|
||||||
|
print("Predict - Real ")
|
||||||
|
for x in range(len(b)):
|
||||||
|
print("%.2f" % (a[x]-b[x]))
|
||||||
|
if (b[x]-(filter/10) <= a[x] <= b[x]+(filter/10)):
|
||||||
|
count=count+1
|
||||||
|
if count>4:
|
||||||
|
print("ACCEPTED . . .")
|
||||||
|
for i in range(len(b)):
|
||||||
|
gap_val.append(abs(a[i]-b[i]))
|
||||||
|
print(len(gap_val))
|
||||||
|
predict_val = hasil[0][-14:]
|
||||||
|
avg.append(predict_val)
|
||||||
|
# avg.append(hasil[0][-10][:14])
|
||||||
|
plt.plot((date+extend)[-20:],hasil[0][-20:], label='Sample %s'%ex, alpha=0.3)
|
||||||
|
gap_avg.append(gap_val)
|
||||||
|
print()
|
||||||
|
else:
|
||||||
|
print("NOT ACCEPTED . . .")
|
||||||
|
print()
|
||||||
|
# print(avg)
|
||||||
|
print("#################################################")
|
||||||
|
print(avg)
|
||||||
|
print(len(avg))
|
||||||
|
print(gap_avg)
|
||||||
|
print(len(gap_avg))
|
||||||
|
print("#################################################")
|
||||||
|
avg_total=[]
|
||||||
|
good_index=[]
|
||||||
|
for x in range(len(extend)):
|
||||||
|
print("NOW ", x)
|
||||||
|
temp=[]
|
||||||
|
gap_temp = []
|
||||||
|
pred_temp = []
|
||||||
|
if x < 7:
|
||||||
|
for a in range(len(gap_avg)):
|
||||||
|
# print(gap_temp.size())
|
||||||
|
gap_temp.append(gap_avg[a][x])
|
||||||
|
print("Gap Temp")
|
||||||
|
print(gap_temp)
|
||||||
|
# for b in range(len(gap_avg)):
|
||||||
|
good_index.append(gap_temp.index(min(gap_temp)))
|
||||||
|
else:
|
||||||
|
print("LOL")
|
||||||
|
c = Counter(good_index)
|
||||||
|
c = c.most_common(1)
|
||||||
|
good_index.append(c[0][0])
|
||||||
|
|
||||||
|
print("Good Index")
|
||||||
|
print(len(good_index))
|
||||||
|
print(good_index)
|
||||||
|
print()
|
||||||
|
for c in range(len(extend)):
|
||||||
|
print(c)
|
||||||
|
temp.append(avg[int(good_index[c])][c])
|
||||||
|
print(temp)
|
||||||
|
# avg_total.append(sum(temp)/len(temp))
|
||||||
|
# mean_pred = np.mean(temp)
|
||||||
|
# print(mean_pred)
|
||||||
|
avg_total = temp
|
||||||
|
|
||||||
|
print(avg_total)
|
||||||
|
# print(avg_total)
|
||||||
|
|
||||||
|
atas=[x+(1/visual*x) for x in avg_total]
|
||||||
|
bawah=[x-(1/visual*x) for x in avg_total]
|
||||||
|
print([hasil[1][0:-5][-1]]+bawah)
|
||||||
|
print([hasil[1][0:-5][-1]]+atas)
|
||||||
|
print([date[-1]]+extend)
|
||||||
|
plt.fill_between([date[-1]]+extend,[hasil[1][0:-5][-1]]+bawah,[hasil[1][0:-5][-1]]+atas,alpha=0.2,label='Prediction Band')
|
||||||
|
plt.plot([date[-1]]+extend,[hasil[1][-5]]+avg_total,color='y',linewidth=3,label='Prediction', marker='x')
|
||||||
|
plt.plot(([date[-1]]+extend)[-15:][:8],hasil[1][-8:],color='grey',linewidth=3, linestyle='--', marker='x', alpha=0.8, label="Confirmation")
|
||||||
|
plt.grid()
|
||||||
|
# plt.show()
|
||||||
|
|
||||||
|
# print("Date Extended ",(date+extend))
|
||||||
|
symbol = yf.Ticker(tick)
|
||||||
|
symbol = symbol.history(start=akhir,end=akhir+timedelta(days=20),interval='1d')
|
||||||
|
print(symbol)
|
||||||
|
symbol = symbol.drop(symbol.index[0])
|
||||||
|
symbol = symbol.drop(symbol.index[0])
|
||||||
|
symbol = symbol['Close'][0:14].tolist()
|
||||||
|
# symbol = symbol['Close'].tolist()
|
||||||
|
# print(symbol)
|
||||||
|
#plt.plot(extend[0:len(symbol)],symbol,color='g',label='Actual',linewidth=1)
|
||||||
|
# plt.plot(hasil[1],color='r', linestyle='--', label='Confirmation',linewidth=2)
|
||||||
|
# plt.plot(hasil[2]+extend[0:14],hasil[0],color='b',label='Train',linewidth=2, alpha=0.4)
|
||||||
|
# plt.plot(symbol, label="Symbol Real")
|
||||||
|
# plt.plot(hasil[0], label="Real Predictioh")
|
||||||
|
# print(symbol)
|
||||||
|
# print(symbol)
|
||||||
|
# detail = str(akhir)+"\n"+"Prediction :"+str(avg_total[-1:])+"\n"+"Real : "
|
||||||
|
# plt.text(0.05, 120, detail, color='black', bbox=dict(facecolor='none', edgecolor='black', boxstyle='round,pad=1'))
|
||||||
|
plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.01),
|
||||||
|
fancybox=True, shadow=True, ncol=7)
|
||||||
|
plt.title(tick+" Date: "+str(hasil[2][-1])+" to "+str(extend[-1]))
|
||||||
|
# plt.get_xaxis().set_ticks([])
|
||||||
|
new=yf.Ticker(tick)
|
||||||
|
new=new.history(interval="1d",start=([date[-1]]+extend)[-15:][:8][-1],end=datetime.today())
|
||||||
|
plt.plot(new.index.astype(str),new["Close"],color='red',linewidth=3, linestyle='--', marker='x', alpha=0.8, label="True Trend")
|
||||||
|
print(new)
|
||||||
|
print(extend[-len(new):])
|
||||||
|
plt.xticks([])
|
||||||
|
plt.show()
|
50
plot_loss.py
Normal file
50
plot_loss.py
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
import matplotlib.pyplot as plt
|
||||||
|
import pandas as pd
|
||||||
|
import numpy as np
|
||||||
|
import matplotlib.animation as animation
|
||||||
|
from scipy.interpolate import make_interp_spline, BSpline
|
||||||
|
from datetime import date
|
||||||
|
import glob
|
||||||
|
import os
|
||||||
|
|
||||||
|
data_date = date.today()
|
||||||
|
loss_dir = "Loss/"+str(data_date)+"/"
|
||||||
|
list_of_files = glob.glob(loss_dir+"*")
|
||||||
|
latest_file = max(list_of_files, key=os.path.getctime)
|
||||||
|
print("Current File : ", latest_file)
|
||||||
|
|
||||||
|
fig = plt.figure()
|
||||||
|
ax = fig.add_subplot(1,1,1)
|
||||||
|
props = dict(boxstyle="square", facecolor='wheat', alpha=0.5)
|
||||||
|
data = np.load(latest_file)
|
||||||
|
|
||||||
|
def init():
|
||||||
|
disp_data =round(0.3*len(data))
|
||||||
|
ax.set_ylim(0,np.max(data[-disp_data:])*1.5)
|
||||||
|
ax.set_xlim(0,len(data))
|
||||||
|
|
||||||
|
def animate(i):
|
||||||
|
ax.clear()
|
||||||
|
data = np.load(latest_file)
|
||||||
|
x = np.arange(len(data))
|
||||||
|
smooth_data = np.linspace(x.min(), x.max(), round(len(data)/5))
|
||||||
|
print(smooth_data)
|
||||||
|
disp_data =round(0.2*len(data))
|
||||||
|
spl = make_interp_spline(x, data, k=3)
|
||||||
|
y_smooth = spl(smooth_data)
|
||||||
|
# y_smooth = spline(x, data, smooth_data)
|
||||||
|
# last_data = "loss = %.6f" % data[-1]
|
||||||
|
|
||||||
|
ax.set_ylim(0,np.max(data[-disp_data:])*2)
|
||||||
|
ax.set_xlim(0,len(data))
|
||||||
|
# ax.text(0.05, 0.9, last_data, transform=ax.transAxes, bbox=props)
|
||||||
|
# ax.set_yscale('log')
|
||||||
|
ax.title.set_text("Loss of "+str(latest_file))
|
||||||
|
ax.plot(smooth_data, y_smooth, color='tomato', alpha=0.8)
|
||||||
|
# ax.plot(data, color='tomato', alpha=0.8)
|
||||||
|
|
||||||
|
ani = animation.FuncAnimation(fig, animate, interval=20, init_func=init)
|
||||||
|
# ani.save(filename='loss_animation.mp4', fps=30, extra_args=['-vcodec', 'libx264'], dpi=300, bitrate=1800, metadata=dict(artist='Nino'))
|
||||||
|
plt.show()
|
||||||
|
|
||||||
|
|
139
plotterpitch.py
Normal file
139
plotterpitch.py
Normal file
@ -0,0 +1,139 @@
|
|||||||
|
import matplotlib.pyplot as plt
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
from datetime import datetime, date, timedelta
|
||||||
|
from matplotlib.dates import date2num
|
||||||
|
import yfinance as yf
|
||||||
|
import sys
|
||||||
|
import glob
|
||||||
|
import numpy as np
|
||||||
|
# path='../Febrian/Bang Nino/Samples/NEE/2020-05-08/'
|
||||||
|
|
||||||
|
my_args = sys.argv[1:]
|
||||||
|
tick = my_args[0]
|
||||||
|
filter=int(my_args[1])
|
||||||
|
visual=100
|
||||||
|
print(tick)
|
||||||
|
# path='./Samples/'+tick+'/2020-12-07/'
|
||||||
|
path='./Samples_Cross/Samples/'+tick+'/'
|
||||||
|
all_dirs = glob.glob(path+"*")
|
||||||
|
print(path)
|
||||||
|
print(all_dirs)
|
||||||
|
latest_dir = max(all_dirs, key=os.path.getctime)
|
||||||
|
print("Latest Data = ", latest_dir)
|
||||||
|
latest_dir = latest_dir + "/"
|
||||||
|
listsymbol = os.listdir(latest_dir+'/')
|
||||||
|
print(listsymbol)
|
||||||
|
|
||||||
|
with open(latest_dir+listsymbol[0],'r') as f:
|
||||||
|
print(latest_dir+listsymbol[0])
|
||||||
|
hasil=json.load(f)
|
||||||
|
# print(hasil)
|
||||||
|
print(len(hasil[0]))
|
||||||
|
print(len(hasil[1]))
|
||||||
|
print(len(hasil[2]))
|
||||||
|
akhir=datetime.strptime(str(hasil[2][-1]),'%Y-%m-%d').date()
|
||||||
|
print("Current Log : ",akhir)
|
||||||
|
extend1=[akhir+timedelta(days=x) for x in range(1,50,1)]
|
||||||
|
extend=[]
|
||||||
|
|
||||||
|
for x in extend1:
|
||||||
|
if x.weekday() not in [5,6]:
|
||||||
|
extend.append(x)
|
||||||
|
|
||||||
|
extend=extend[:14]
|
||||||
|
extend=[str(x) for x in extend]
|
||||||
|
print("ASasdasdsa",extend)
|
||||||
|
# date=date2num(date)
|
||||||
|
date=hasil[2]
|
||||||
|
plt.plot(hasil[1],color='grey',linewidth=3, linestyle='--', marker='x', alpha=0.8, label="Confirmation")
|
||||||
|
plt.plot(hasil[2],hasil[1][0:-5],color='b',linewidth=2)
|
||||||
|
# plt.show()
|
||||||
|
|
||||||
|
symbol=yf.Ticker(tick)
|
||||||
|
symbol=symbol.history(start=akhir,end=akhir+timedelta(days=30),interval='1d')
|
||||||
|
# print(symbol)
|
||||||
|
symbol=symbol.drop(symbol.index[0])
|
||||||
|
symbol=symbol.drop(symbol.index[0])
|
||||||
|
symbol=symbol['Close'][0:14].tolist()
|
||||||
|
# plt.plot(extend,symbol,color='g',linewidth=10)
|
||||||
|
avg=[]
|
||||||
|
for ex,x in enumerate(listsymbol):
|
||||||
|
with open(latest_dir+x) as f:
|
||||||
|
print("#################################################")
|
||||||
|
print(x)
|
||||||
|
hasil=json.load(f)
|
||||||
|
# a=hasil[0][-14:][:14]
|
||||||
|
a = hasil[0][-14:][:7]
|
||||||
|
b=hasil[1][-7:]
|
||||||
|
print("A & B Temp :")
|
||||||
|
print(a)
|
||||||
|
print(b)
|
||||||
|
count=0
|
||||||
|
print("Predict - Real ")
|
||||||
|
for x in range(7):
|
||||||
|
print("%.2f" % (a[x]-b[x]))
|
||||||
|
if (b[x]-(filter/10) <= a[x] <= b[x]+(filter/10)):
|
||||||
|
count=count+1
|
||||||
|
if count>5:
|
||||||
|
print("ACCEPTED . . .")
|
||||||
|
predict_val = hasil[0][-14:]
|
||||||
|
avg.append(predict_val)
|
||||||
|
# avg.append(hasil[0][-10][:14])
|
||||||
|
# plt.plot(date+extend,hasil[0], label='Sample %s'%ex, alpha=0.3)
|
||||||
|
print()
|
||||||
|
else:
|
||||||
|
print("NOT ACCEPTED . . .")
|
||||||
|
print()
|
||||||
|
# print(avg)
|
||||||
|
print("#################################################")
|
||||||
|
print(avg)
|
||||||
|
print(len(avg))
|
||||||
|
print("#################################################")
|
||||||
|
avg_total=[]
|
||||||
|
for x in range(len(extend)):
|
||||||
|
temp=[]
|
||||||
|
for a in range(len(avg)):
|
||||||
|
temp.append(avg[a][x])
|
||||||
|
# avg_total.append(sum(temp)/len(temp))
|
||||||
|
mean_pred = np.mean(temp)
|
||||||
|
print(mean_pred)
|
||||||
|
avg_total.append(mean_pred)
|
||||||
|
|
||||||
|
# print(avg_total)
|
||||||
|
|
||||||
|
atas=[x+(1/visual*x) for x in avg_total]
|
||||||
|
bawah=[x-(1/visual*x) for x in avg_total]
|
||||||
|
print([hasil[1][0:-5][-1]]+bawah)
|
||||||
|
print([hasil[1][0:-5][-1]]+atas)
|
||||||
|
print([date[-1]]+extend)
|
||||||
|
plt.fill_between([date[-1]]+extend,[hasil[1][0:-5][-1]]+bawah,[hasil[1][0:-5][-1]]+atas,alpha=0.2,label='Prediction Band')
|
||||||
|
plt.plot([date[-1]]+extend,[hasil[1][-5]]+avg_total,color='y',linewidth=1,label='Prediction', marker='x')
|
||||||
|
plt.grid()
|
||||||
|
# plt.show()
|
||||||
|
|
||||||
|
# print("Date Extended ",(date+extend))
|
||||||
|
symbol = yf.Ticker(tick)
|
||||||
|
symbol = symbol.history(start=akhir,end=akhir+timedelta(days=20),interval='1d')
|
||||||
|
print(symbol)
|
||||||
|
symbol = symbol.drop(symbol.index[0])
|
||||||
|
symbol = symbol.drop(symbol.index[0])
|
||||||
|
symbol = symbol['Close'][0:14].tolist()
|
||||||
|
# symbol = symbol['Close'].tolist()
|
||||||
|
# print(symbol)
|
||||||
|
# plt.plot(extend[0:len(symbol)],symbol,color='g',label='Actual',linewidth=1)
|
||||||
|
# plt.plot(hasil[1],color='r', linestyle='--', label='Confirmation',linewidth=2)
|
||||||
|
# plt.plot(hasil[2]+extend[0:14],hasil[0],color='b',label='Train',linewidth=2, alpha=0.4)
|
||||||
|
# plt.plot(symbol, label="Symbol Real")
|
||||||
|
# plt.plot(hasil[0], label="Real Predictioh")
|
||||||
|
# print(symbol)
|
||||||
|
# print(symbol)
|
||||||
|
# detail = str(akhir)+"\n"+"Prediction :"+str(avg_total[-1:])+"\n"+"Real : "
|
||||||
|
# plt.text(0.05, 120, detail, color='black', bbox=dict(facecolor='none', edgecolor='black', boxstyle='round,pad=1'))
|
||||||
|
plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.01),
|
||||||
|
fancybox=True, shadow=True, ncol=7)
|
||||||
|
plt.title(tick+" Date: "+str(hasil[2][-1])+" to "+str(extend[-1]))
|
||||||
|
# plt.get_xaxis().set_ticks([])
|
||||||
|
plt.xticks([])
|
||||||
|
plt.show()
|
||||||
|
# plt.savefig("./temp_pred.png")
|
187
plotterpitch_django.py
Normal file
187
plotterpitch_django.py
Normal file
@ -0,0 +1,187 @@
|
|||||||
|
import matplotlib.pyplot as plt
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
from datetime import datetime, date, timedelta
|
||||||
|
from matplotlib.dates import date2num
|
||||||
|
import yfinance as yf
|
||||||
|
import sys
|
||||||
|
import glob
|
||||||
|
import numpy as np
|
||||||
|
from collections import Counter
|
||||||
|
# path='../Febrian/Bang Nino/Samples/NEE/2020-05-08/'
|
||||||
|
|
||||||
|
def django_pass(func_stock, func_param):
|
||||||
|
# my_args = sys.argv[1:]
|
||||||
|
my_args = func_stock
|
||||||
|
tick = my_args
|
||||||
|
# filter=int(my_args[1])
|
||||||
|
filter = func_param
|
||||||
|
visual=50
|
||||||
|
# print(tick)
|
||||||
|
# path='./Samples/'+tick+'/2020-12-07/'
|
||||||
|
path='/home/kraken/Stock/v04/Samples/'+tick+'/'
|
||||||
|
all_dirs = glob.glob(path+"*")
|
||||||
|
# print(path)
|
||||||
|
# print(all_dirs)
|
||||||
|
latest_dir = max(all_dirs, key=os.path.getmtime)
|
||||||
|
# print("Latest Data = ", latest_dir)
|
||||||
|
latest_dir = latest_dir + "/"
|
||||||
|
listsymbol = os.listdir(latest_dir+'/')
|
||||||
|
# print(listsymbol)
|
||||||
|
|
||||||
|
with open(latest_dir+listsymbol[0],'r') as f:
|
||||||
|
# print(latest_dir+listsymbol[0])
|
||||||
|
hasil=json.load(f)
|
||||||
|
# print(hasil)
|
||||||
|
# print(len(hasil[0]))
|
||||||
|
# print(len(hasil[1]))
|
||||||
|
# print(len(hasil[2]))
|
||||||
|
akhir=datetime.strptime(str(hasil[2][-1]),'%Y-%m-%d').date()
|
||||||
|
# print("Current Log : ",akhir)
|
||||||
|
extend1=[akhir+timedelta(days=x) for x in range(1,50,1)]
|
||||||
|
extend=[]
|
||||||
|
|
||||||
|
for x in extend1:
|
||||||
|
if x.weekday() not in [5,6]:
|
||||||
|
extend.append(x)
|
||||||
|
|
||||||
|
extend=extend[:14]
|
||||||
|
extend=[str(x) for x in extend]
|
||||||
|
# print("ASasdasdsa",extend)
|
||||||
|
# date=date2num(date)
|
||||||
|
date=hasil[2]
|
||||||
|
# plt.plot(hasil[1],color='grey',linewidth=3, linestyle='--', marker='x', alpha=0.8, label="Confirmation")
|
||||||
|
# plt.plot(hasil[2],hasil[1][0:-7],color='b',linewidth=2)
|
||||||
|
# plt.show()
|
||||||
|
|
||||||
|
symbol=yf.Ticker(tick)
|
||||||
|
symbol=symbol.history(start=akhir,end=akhir+timedelta(days=30),interval='1d')
|
||||||
|
# print(symbol)
|
||||||
|
symbol=symbol.drop(symbol.index[0])
|
||||||
|
symbol=symbol.drop(symbol.index[0])
|
||||||
|
symbol=symbol['Close'][0:14].tolist()
|
||||||
|
# plt.plot(extend,symbol,color='g',linewidth=10)
|
||||||
|
avg=[]
|
||||||
|
gap_avg = []
|
||||||
|
for ex,x in enumerate(listsymbol):
|
||||||
|
gap_val = []
|
||||||
|
with open(latest_dir+x) as f:
|
||||||
|
# print("#################################################")
|
||||||
|
# print(x)
|
||||||
|
hasil=json.load(f)
|
||||||
|
# a=hasil[0][-14:][:14]
|
||||||
|
a = hasil[0][-14:][:7]
|
||||||
|
b=hasil[1][-7:]
|
||||||
|
# print("A & B Temp :")
|
||||||
|
# print(a)
|
||||||
|
# print(b)
|
||||||
|
count=0
|
||||||
|
# print("Predict - Real ")
|
||||||
|
for x in range(len(b)):
|
||||||
|
# print("%.2f" % (a[x]-b[x]))
|
||||||
|
if (b[x]-(filter/10) <= a[x] <= b[x]+(filter/10)):
|
||||||
|
count=count+1
|
||||||
|
if count>4:
|
||||||
|
# print("ACCEPTED . . .")
|
||||||
|
for i in range(len(b)):
|
||||||
|
gap_val.append(abs(a[i]-b[i]))
|
||||||
|
# print(len(gap_val))
|
||||||
|
predict_val = hasil[0][-14:]
|
||||||
|
avg.append(predict_val)
|
||||||
|
# avg.append(hasil[0][-10][:14])
|
||||||
|
# plt.plot(date+extend,hasil[0], label='Sample %s'%ex, alpha=0.3)
|
||||||
|
gap_avg.append(gap_val)
|
||||||
|
# print()
|
||||||
|
# else:
|
||||||
|
# print("NOT ACCEPTED . . .")
|
||||||
|
# print()
|
||||||
|
# print(avg)
|
||||||
|
# print("#################################################")
|
||||||
|
# print(avg)
|
||||||
|
# print(len(avg))
|
||||||
|
# print(gap_avg)
|
||||||
|
# print(len(gap_avg))
|
||||||
|
# print("#################################################")
|
||||||
|
# avg_total=[]
|
||||||
|
|
||||||
|
good_index=[]
|
||||||
|
avg_total=[]
|
||||||
|
for x in range(len(extend)):
|
||||||
|
# print("NOW ", x)
|
||||||
|
temp=[]
|
||||||
|
gap_temp = []
|
||||||
|
pred_temp = []
|
||||||
|
if x < 7:
|
||||||
|
for a in range(len(gap_avg)):
|
||||||
|
# print(gap_temp.size())
|
||||||
|
gap_temp.append(gap_avg[a][x])
|
||||||
|
# print("Gap Temp")
|
||||||
|
# print(gap_temp)
|
||||||
|
# for b in range(len(gap_avg)):
|
||||||
|
good_index.append(gap_temp.index(min(gap_temp)))
|
||||||
|
else:
|
||||||
|
# print("LOL")
|
||||||
|
c = Counter(good_index)
|
||||||
|
c = c.most_common(1)
|
||||||
|
good_index.append(c[0][0])
|
||||||
|
|
||||||
|
# print("Good Index")
|
||||||
|
# print(len(good_index))
|
||||||
|
# print(good_index)
|
||||||
|
# print()
|
||||||
|
|
||||||
|
for c in range(len(extend)):
|
||||||
|
# print(c)
|
||||||
|
temp.append(avg[int(good_index[c])][c])
|
||||||
|
# print(temp)
|
||||||
|
# avg_total.append(sum(temp)/len(temp))
|
||||||
|
# mean_pred = np.mean(temp)
|
||||||
|
# print(mean_pred)
|
||||||
|
avg_total = temp
|
||||||
|
|
||||||
|
# print(avg_total)
|
||||||
|
|
||||||
|
atas=[x+(1/visual*x) for x in avg_total]
|
||||||
|
bawah=[x-(1/visual*x) for x in avg_total]
|
||||||
|
# print([hasil[1][0:-5][-1]]+bawah)
|
||||||
|
# print([hasil[1][0:-5][-1]]+atas)
|
||||||
|
# print([date[-1]]+extend)
|
||||||
|
|
||||||
|
# plt.fill_between([date[-1]]+extend,[hasil[1][0:-5][-1]]+bawah,[hasil[1][0:-5][-1]]+atas,alpha=0.2,label='Prediction Band')
|
||||||
|
# plt.plot([date[-1]]+extend,[hasil[1][-5]]+avg_total,color='y',linewidth=3,label='Prediction', marker='x')
|
||||||
|
# plt.grid()
|
||||||
|
# plt.show()
|
||||||
|
# print("Date Extended ",(date+extend))
|
||||||
|
symbol = yf.Ticker(tick)
|
||||||
|
symbol = symbol.history(start=akhir,end=akhir+timedelta(days=20),interval='1d')
|
||||||
|
#print(symbol)
|
||||||
|
symbol = symbol.drop(symbol.index[0])
|
||||||
|
symbol = symbol.drop(symbol.index[0])
|
||||||
|
symbol = symbol['Close'][0:14].tolist()
|
||||||
|
|
||||||
|
# symbol = symbol['Close'].tolist()
|
||||||
|
## print(symbol)
|
||||||
|
# plt.plot(extend[0:len(symbol)],symbol,color='r',label='Actual',linewidth=2)
|
||||||
|
# plt.plot(hasil[1],color='r', linestyle='--', label='Confirmation',linewidth=2)
|
||||||
|
# plt.plot(hasil[2]+extend[0:14],hasil[0],color='b',label='Train',linewidth=2, alpha=0.4)
|
||||||
|
# plt.plot(symbol, label="Symbol Real")
|
||||||
|
# plt.plot(hasil[0], label="Real Predictioh")
|
||||||
|
# print(symbol)
|
||||||
|
# print(symbol)
|
||||||
|
# detail = str(akhir)+"\n"+"Prediction :"+str(avg_total[-1:])+"\n"+"Real : "
|
||||||
|
# plt.text(0.05, 120, detail, color='black', bbox=dict(facecolor='none', edgecolor='black', boxstyle='round,pad=1'))
|
||||||
|
# plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.01),
|
||||||
|
# fancybox=True, shadow=True, ncol=7)
|
||||||
|
# plt.title(tick+" Date: "+str(hasil[2][-1])+" to "+str(extend[-1]))
|
||||||
|
# plt.get_xaxis().set_ticks([])
|
||||||
|
# plt.xticks([])
|
||||||
|
# plt.show()
|
||||||
|
# plt.xlim([len(date)+len(extend)-30, len(date)+len(extend)])
|
||||||
|
ret_data = []
|
||||||
|
ret_date = []
|
||||||
|
ret_date.append(hasil[2]+extend)
|
||||||
|
ret_data.append(avg_total)
|
||||||
|
# ret_data.append(hasil[1])
|
||||||
|
ret_data.append(hasil[1])
|
||||||
|
return ret_data, ret_date
|
||||||
|
|
172
plotterpitch_min.py
Normal file
172
plotterpitch_min.py
Normal file
@ -0,0 +1,172 @@
|
|||||||
|
import matplotlib.pyplot as plt
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
from datetime import datetime, date, timedelta
|
||||||
|
from matplotlib.dates import date2num
|
||||||
|
import yfinance as yf
|
||||||
|
import sys
|
||||||
|
import glob
|
||||||
|
import numpy as np
|
||||||
|
from collections import Counter
|
||||||
|
# path='../Febrian/Bang Nino/Samples/NEE/2020-05-08/'
|
||||||
|
|
||||||
|
my_args = sys.argv[1:]
|
||||||
|
tick = my_args[0]
|
||||||
|
filter=int(my_args[1])
|
||||||
|
visual=50
|
||||||
|
print(tick)
|
||||||
|
# path='./Samples/'+tick+'/2020-12-07/'
|
||||||
|
path='./Samples/'+tick+'/'
|
||||||
|
all_dirs = glob.glob(path+"*")
|
||||||
|
print(path)
|
||||||
|
print(all_dirs)
|
||||||
|
latest_dir = max(all_dirs, key=os.path.getctime)
|
||||||
|
print("Latest Data = ", latest_dir)
|
||||||
|
latest_dir = latest_dir + "/"
|
||||||
|
listsymbol = os.listdir(latest_dir+'/')
|
||||||
|
print(listsymbol)
|
||||||
|
|
||||||
|
with open(latest_dir+listsymbol[0],'r') as f:
|
||||||
|
print(latest_dir+listsymbol[0])
|
||||||
|
hasil=json.load(f)
|
||||||
|
# print(hasil)
|
||||||
|
print(len(hasil[0]))
|
||||||
|
print(len(hasil[1]))
|
||||||
|
print(len(hasil[2]))
|
||||||
|
akhir=datetime.strptime(str(hasil[2][-1]),'%Y-%m-%d').date()
|
||||||
|
print("Current Log : ",akhir)
|
||||||
|
extend1=[akhir+timedelta(days=x) for x in range(1,50,1)]
|
||||||
|
extend=[]
|
||||||
|
|
||||||
|
for x in extend1:
|
||||||
|
if x.weekday() not in [5,6]:
|
||||||
|
extend.append(x)
|
||||||
|
|
||||||
|
extend=extend[:14]
|
||||||
|
extend=[str(x) for x in extend]
|
||||||
|
print("ASasdasdsa",extend)
|
||||||
|
# date=date2num(date)
|
||||||
|
date=hasil[2]
|
||||||
|
plt.plot(hasil[1],color='grey',linewidth=3, linestyle='--', marker='x', alpha=0.8, label="Confirmation")
|
||||||
|
plt.plot(hasil[2],hasil[1][0:-7],color='b',linewidth=2)
|
||||||
|
# plt.show()
|
||||||
|
|
||||||
|
symbol=yf.Ticker(tick)
|
||||||
|
symbol=symbol.history(start=akhir,end=akhir+timedelta(days=30),interval='1d')
|
||||||
|
# print(symbol)
|
||||||
|
symbol=symbol.drop(symbol.index[0])
|
||||||
|
symbol=symbol.drop(symbol.index[0])
|
||||||
|
symbol=symbol['Close'][0:14].tolist()
|
||||||
|
# plt.plot(extend,symbol,color='g',linewidth=10)
|
||||||
|
avg=[]
|
||||||
|
gap_avg = []
|
||||||
|
for ex,x in enumerate(listsymbol):
|
||||||
|
gap_val = []
|
||||||
|
with open(latest_dir+x) as f:
|
||||||
|
print("#################################################")
|
||||||
|
print(x)
|
||||||
|
hasil=json.load(f)
|
||||||
|
# a=hasil[0][-14:][:14]
|
||||||
|
a = hasil[0][-14:][:7]
|
||||||
|
b=hasil[1][-7:]
|
||||||
|
print("A & B Temp :")
|
||||||
|
print(a)
|
||||||
|
print(b)
|
||||||
|
count=0
|
||||||
|
print("Predict - Real ")
|
||||||
|
for x in range(len(b)):
|
||||||
|
print("%.2f" % (a[x]-b[x]))
|
||||||
|
if (b[x]-(filter/10) <= a[x] <= b[x]+(filter/10)):
|
||||||
|
count=count+1
|
||||||
|
if count>4:
|
||||||
|
print("ACCEPTED . . .")
|
||||||
|
for i in range(len(b)):
|
||||||
|
gap_val.append(abs(a[i]-b[i]))
|
||||||
|
print(len(gap_val))
|
||||||
|
predict_val = hasil[0][-14:]
|
||||||
|
avg.append(predict_val)
|
||||||
|
# avg.append(hasil[0][-10][:14])
|
||||||
|
# plt.plot(date+extend,hasil[0], label='Sample %s'%ex, alpha=0.3)
|
||||||
|
gap_avg.append(gap_val)
|
||||||
|
print()
|
||||||
|
else:
|
||||||
|
print("NOT ACCEPTED . . .")
|
||||||
|
print()
|
||||||
|
# print(avg)
|
||||||
|
print("#################################################")
|
||||||
|
print(avg)
|
||||||
|
print(len(avg))
|
||||||
|
print(gap_avg)
|
||||||
|
print(len(gap_avg))
|
||||||
|
print("#################################################")
|
||||||
|
avg_total=[]
|
||||||
|
good_index=[]
|
||||||
|
for x in range(len(extend)):
|
||||||
|
print("NOW ", x)
|
||||||
|
temp=[]
|
||||||
|
gap_temp = []
|
||||||
|
pred_temp = []
|
||||||
|
if x < 7:
|
||||||
|
for a in range(len(gap_avg)):
|
||||||
|
# print(gap_temp.size())
|
||||||
|
gap_temp.append(gap_avg[a][x])
|
||||||
|
print("Gap Temp")
|
||||||
|
print(gap_temp)
|
||||||
|
# for b in range(len(gap_avg)):
|
||||||
|
good_index.append(gap_temp.index(min(gap_temp)))
|
||||||
|
else:
|
||||||
|
print("LOL")
|
||||||
|
c = Counter(good_index)
|
||||||
|
c = c.most_common(1)
|
||||||
|
good_index.append(c[0][0])
|
||||||
|
|
||||||
|
print("Good Index")
|
||||||
|
print(len(good_index))
|
||||||
|
print(good_index)
|
||||||
|
print()
|
||||||
|
for c in range(len(extend)):
|
||||||
|
print(c)
|
||||||
|
temp.append(avg[int(good_index[c])][c])
|
||||||
|
print(temp)
|
||||||
|
# avg_total.append(sum(temp)/len(temp))
|
||||||
|
# mean_pred = np.mean(temp)
|
||||||
|
# print(mean_pred)
|
||||||
|
avg_total = temp
|
||||||
|
|
||||||
|
print(avg_total)
|
||||||
|
# print(avg_total)
|
||||||
|
|
||||||
|
atas=[x+(1/visual*x) for x in avg_total]
|
||||||
|
bawah=[x-(1/visual*x) for x in avg_total]
|
||||||
|
print([hasil[1][0:-5][-1]]+bawah)
|
||||||
|
print([hasil[1][0:-5][-1]]+atas)
|
||||||
|
print([date[-1]]+extend)
|
||||||
|
plt.fill_between([date[-1]]+extend,[hasil[1][0:-5][-1]]+bawah,[hasil[1][0:-5][-1]]+atas,alpha=0.2,label='Prediction Band')
|
||||||
|
plt.plot([date[-1]]+extend,[hasil[1][-5]]+avg_total,color='y',linewidth=3,label='Prediction', marker='x')
|
||||||
|
plt.grid()
|
||||||
|
# plt.show()
|
||||||
|
|
||||||
|
# print("Date Extended ",(date+extend))
|
||||||
|
symbol = yf.Ticker(tick)
|
||||||
|
symbol = symbol.history(start=akhir,end=akhir+timedelta(days=20),interval='1d')
|
||||||
|
print(symbol)
|
||||||
|
symbol = symbol.drop(symbol.index[0])
|
||||||
|
symbol = symbol.drop(symbol.index[0])
|
||||||
|
symbol = symbol['Close'][0:14].tolist()
|
||||||
|
# symbol = symbol['Close'].tolist()
|
||||||
|
# print(symbol)
|
||||||
|
plt.plot(extend[0:len(symbol)],symbol,color='g',label='Actual',linewidth=1)
|
||||||
|
# plt.plot(hasil[1],color='r', linestyle='--', label='Confirmation',linewidth=2)
|
||||||
|
# plt.plot(hasil[2]+extend[0:14],hasil[0],color='b',label='Train',linewidth=2, alpha=0.4)
|
||||||
|
# plt.plot(symbol, label="Symbol Real")
|
||||||
|
# plt.plot(hasil[0], label="Real Predictioh")
|
||||||
|
# print(symbol)
|
||||||
|
# print(symbol)
|
||||||
|
# detail = str(akhir)+"\n"+"Prediction :"+str(avg_total[-1:])+"\n"+"Real : "
|
||||||
|
# plt.text(0.05, 120, detail, color='black', bbox=dict(facecolor='none', edgecolor='black', boxstyle='round,pad=1'))
|
||||||
|
plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.01),
|
||||||
|
fancybox=True, shadow=True, ncol=7)
|
||||||
|
plt.title(tick+" Date: "+str(hasil[2][-1])+" to "+str(extend[-1]))
|
||||||
|
# plt.get_xaxis().set_ticks([])
|
||||||
|
plt.xticks([])
|
||||||
|
plt.show()
|
173
plotterpitch_min_backup.py
Normal file
173
plotterpitch_min_backup.py
Normal file
@ -0,0 +1,173 @@
|
|||||||
|
import matplotlib.pyplot as plt
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
from datetime import datetime, date, timedelta
|
||||||
|
from matplotlib.dates import date2num
|
||||||
|
import yfinance as yf
|
||||||
|
import sys
|
||||||
|
import glob
|
||||||
|
import numpy as np
|
||||||
|
from collections import Counter
|
||||||
|
# path='../Febrian/Bang Nino/Samples/NEE/2020-05-08/'
|
||||||
|
|
||||||
|
my_args = sys.argv[1:]
|
||||||
|
tick = my_args[0]
|
||||||
|
filter=int(my_args[1])
|
||||||
|
visual=50
|
||||||
|
print(tick)
|
||||||
|
# path='./Samples/'+tick+'/2020-12-07/'
|
||||||
|
path='./Samples/'+tick+'/'
|
||||||
|
all_dirs = glob.glob(path+"*")
|
||||||
|
print(path)
|
||||||
|
print(all_dirs)
|
||||||
|
latest_dir = max(all_dirs, key=os.path.getctime)
|
||||||
|
print("Latest Data = ", latest_dir)
|
||||||
|
latest_dir = latest_dir + "/"
|
||||||
|
listsymbol = os.listdir(latest_dir+'/')
|
||||||
|
print(listsymbol)
|
||||||
|
|
||||||
|
with open(latest_dir+listsymbol[0],'r') as f:
|
||||||
|
print(latest_dir+listsymbol[0])
|
||||||
|
hasil=json.load(f)
|
||||||
|
# print(hasil)
|
||||||
|
print(len(hasil[0]))
|
||||||
|
print(len(hasil[1]))
|
||||||
|
print(len(hasil[2]))
|
||||||
|
akhir=datetime.strptime(str(hasil[2][-1]),'%Y-%m-%d').date()
|
||||||
|
print("Current Log : ",akhir)
|
||||||
|
extend1=[akhir+timedelta(days=x) for x in range(1,50,1)]
|
||||||
|
extend=[]
|
||||||
|
|
||||||
|
for x in extend1:
|
||||||
|
if x.weekday() not in [5,6]:
|
||||||
|
extend.append(x)
|
||||||
|
|
||||||
|
extend=extend[:14]
|
||||||
|
extend=[str(x) for x in extend]
|
||||||
|
print("ASasdasdsa",extend)
|
||||||
|
# date=date2num(date)
|
||||||
|
date=hasil[2]
|
||||||
|
#plt.plot(([date[-1]]+extend)[-7:],hasil[1][-7:],color='grey',linewidth=3, linestyle='--', marker='x', alpha=0.8, label="Confirmation")
|
||||||
|
plt.plot(hasil[2][-10:],hasil[1][0:-7][-10:],color='b',linewidth=2)
|
||||||
|
# plt.show()
|
||||||
|
|
||||||
|
symbol=yf.Ticker(tick)
|
||||||
|
symbol=symbol.history(start=akhir,end=akhir+timedelta(days=30),interval='1d')
|
||||||
|
# print(symbol)
|
||||||
|
symbol=symbol.drop(symbol.index[0])
|
||||||
|
symbol=symbol.drop(symbol.index[0])
|
||||||
|
symbol=symbol['Close'][0:14].tolist()
|
||||||
|
# plt.plot(extend,symbol,color='g',linewidth=10)
|
||||||
|
avg=[]
|
||||||
|
gap_avg = []
|
||||||
|
for ex,x in enumerate(listsymbol):
|
||||||
|
gap_val = []
|
||||||
|
with open(latest_dir+x) as f:
|
||||||
|
print("#################################################")
|
||||||
|
print(x)
|
||||||
|
hasil=json.load(f)
|
||||||
|
# a=hasil[0][-14:][:14]
|
||||||
|
a = hasil[0][-14:][:7]
|
||||||
|
b=hasil[1][-7:]
|
||||||
|
print("A & B Temp :")
|
||||||
|
print(a)
|
||||||
|
print(b)
|
||||||
|
count=0
|
||||||
|
print("Predict - Real ")
|
||||||
|
for x in range(len(b)):
|
||||||
|
print("%.2f" % (a[x]-b[x]))
|
||||||
|
if (b[x]-(filter/10) <= a[x] <= b[x]+(filter/10)):
|
||||||
|
count=count+1
|
||||||
|
if count>4:
|
||||||
|
print("ACCEPTED . . .")
|
||||||
|
for i in range(len(b)):
|
||||||
|
gap_val.append(abs(a[i]-b[i]))
|
||||||
|
print(len(gap_val))
|
||||||
|
predict_val = hasil[0][-14:]
|
||||||
|
avg.append(predict_val)
|
||||||
|
# avg.append(hasil[0][-10][:14])
|
||||||
|
plt.plot((date+extend)[-20:],hasil[0][-20:], label='Sample %s'%ex, alpha=0.3)
|
||||||
|
gap_avg.append(gap_val)
|
||||||
|
print()
|
||||||
|
else:
|
||||||
|
print("NOT ACCEPTED . . .")
|
||||||
|
print()
|
||||||
|
# print(avg)
|
||||||
|
print("#################################################")
|
||||||
|
print(avg)
|
||||||
|
print(len(avg))
|
||||||
|
print(gap_avg)
|
||||||
|
print(len(gap_avg))
|
||||||
|
print("#################################################")
|
||||||
|
avg_total=[]
|
||||||
|
good_index=[]
|
||||||
|
for x in range(len(extend)):
|
||||||
|
print("NOW ", x)
|
||||||
|
temp=[]
|
||||||
|
gap_temp = []
|
||||||
|
pred_temp = []
|
||||||
|
if x < 7:
|
||||||
|
for a in range(len(gap_avg)):
|
||||||
|
# print(gap_temp.size())
|
||||||
|
gap_temp.append(gap_avg[a][x])
|
||||||
|
print("Gap Temp")
|
||||||
|
print(gap_temp)
|
||||||
|
# for b in range(len(gap_avg)):
|
||||||
|
good_index.append(gap_temp.index(min(gap_temp)))
|
||||||
|
else:
|
||||||
|
print("LOL")
|
||||||
|
c = Counter(good_index)
|
||||||
|
c = c.most_common(1)
|
||||||
|
good_index.append(c[0][0])
|
||||||
|
|
||||||
|
print("Good Index")
|
||||||
|
print(len(good_index))
|
||||||
|
print(good_index)
|
||||||
|
print()
|
||||||
|
for c in range(len(extend)):
|
||||||
|
print(c)
|
||||||
|
temp.append(avg[int(good_index[c])][c])
|
||||||
|
print(temp)
|
||||||
|
# avg_total.append(sum(temp)/len(temp))
|
||||||
|
# mean_pred = np.mean(temp)
|
||||||
|
# print(mean_pred)
|
||||||
|
avg_total = temp
|
||||||
|
|
||||||
|
print(avg_total)
|
||||||
|
# print(avg_total)
|
||||||
|
|
||||||
|
atas=[x+(1/visual*x) for x in avg_total]
|
||||||
|
bawah=[x-(1/visual*x) for x in avg_total]
|
||||||
|
print([hasil[1][0:-5][-1]]+bawah)
|
||||||
|
print([hasil[1][0:-5][-1]]+atas)
|
||||||
|
print([date[-1]]+extend)
|
||||||
|
plt.fill_between([date[-1]]+extend,[hasil[1][0:-5][-1]]+bawah,[hasil[1][0:-5][-1]]+atas,alpha=0.2,label='Prediction Band')
|
||||||
|
plt.plot([date[-1]]+extend,[hasil[1][-5]]+avg_total,color='y',linewidth=3,label='Prediction', marker='x')
|
||||||
|
plt.plot(([date[-1]]+extend)[-15:][:8],hasil[1][-8:],color='grey',linewidth=3, linestyle='--', marker='x', alpha=0.8, label="Confirmation")
|
||||||
|
plt.grid()
|
||||||
|
# plt.show()
|
||||||
|
|
||||||
|
# print("Date Extended ",(date+extend))
|
||||||
|
symbol = yf.Ticker(tick)
|
||||||
|
symbol = symbol.history(start=akhir,end=akhir+timedelta(days=20),interval='1d')
|
||||||
|
print(symbol)
|
||||||
|
symbol = symbol.drop(symbol.index[0])
|
||||||
|
symbol = symbol.drop(symbol.index[0])
|
||||||
|
symbol = symbol['Close'][0:14].tolist()
|
||||||
|
# symbol = symbol['Close'].tolist()
|
||||||
|
# print(symbol)
|
||||||
|
#plt.plot(extend[0:len(symbol)],symbol,color='g',label='Actual',linewidth=1)
|
||||||
|
# plt.plot(hasil[1],color='r', linestyle='--', label='Confirmation',linewidth=2)
|
||||||
|
# plt.plot(hasil[2]+extend[0:14],hasil[0],color='b',label='Train',linewidth=2, alpha=0.4)
|
||||||
|
# plt.plot(symbol, label="Symbol Real")
|
||||||
|
# plt.plot(hasil[0], label="Real Predictioh")
|
||||||
|
# print(symbol)
|
||||||
|
# print(symbol)
|
||||||
|
# detail = str(akhir)+"\n"+"Prediction :"+str(avg_total[-1:])+"\n"+"Real : "
|
||||||
|
# plt.text(0.05, 120, detail, color='black', bbox=dict(facecolor='none', edgecolor='black', boxstyle='round,pad=1'))
|
||||||
|
plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.01),
|
||||||
|
fancybox=True, shadow=True, ncol=7)
|
||||||
|
plt.title(tick+" Date: "+str(hasil[2][-1])+" to "+str(extend[-1]))
|
||||||
|
# plt.get_xaxis().set_ticks([])
|
||||||
|
plt.xticks([])
|
||||||
|
plt.show()
|
624
predictor.py
Normal file
624
predictor.py
Normal file
@ -0,0 +1,624 @@
|
|||||||
|
import os
|
||||||
|
import sys
|
||||||
|
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
|
||||||
|
import tensorflow as tf
|
||||||
|
import numpy as np
|
||||||
|
import matplotlib.animation as animation
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
import seaborn as sns
|
||||||
|
import pandas as pd
|
||||||
|
from sklearn.preprocessing import MinMaxScaler
|
||||||
|
from datetime import datetime
|
||||||
|
from datetime import timedelta
|
||||||
|
from tqdm import tqdm
|
||||||
|
sns.set()
|
||||||
|
import json
|
||||||
|
tf.compat.v1.random.set_random_seed(1234)
|
||||||
|
from matplotlib import style
|
||||||
|
# import matplotlib.backends.backend_qt5agg
|
||||||
|
# %matplotlib qt
|
||||||
|
style.use('ggplot')
|
||||||
|
import math
|
||||||
|
import yfinance as yf
|
||||||
|
import time
|
||||||
|
from datetime import date, timedelta
|
||||||
|
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
|
||||||
|
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
|
||||||
|
import cProfile
|
||||||
|
import pstats
|
||||||
|
|
||||||
|
# In[34]:
|
||||||
|
|
||||||
|
symbols=os.listdir()
|
||||||
|
symbols
|
||||||
|
tf.compat.v1.disable_eager_execution()
|
||||||
|
# INITIAL VARS
|
||||||
|
test_size = 14
|
||||||
|
simulation_size = 1
|
||||||
|
|
||||||
|
# MODEL VARS
|
||||||
|
num_layers = 2
|
||||||
|
size_layer = 128
|
||||||
|
timestamp = 7
|
||||||
|
epoch = 20
|
||||||
|
dropout_rate = 0.8
|
||||||
|
prediction_gap = sys.argv[2]
|
||||||
|
future_day = test_size
|
||||||
|
learning_rate = 0.01
|
||||||
|
graph_loss = []
|
||||||
|
|
||||||
|
# In[35]:
|
||||||
|
# Necessary Dirs
|
||||||
|
# def date_manage(date1,date2=None):
|
||||||
|
# if date2 is None:
|
||||||
|
# date2=date1+timedelta(days=365)
|
||||||
|
# date_col=[]
|
||||||
|
# for n in range(int ((date2 - date1).days)+1):
|
||||||
|
# date_col.append(date1 + timedelta(n))
|
||||||
|
# weekdays = [5,6]
|
||||||
|
# date_result=[]
|
||||||
|
# for dt in date_col:
|
||||||
|
# if dt.weekday() not in weekdays:
|
||||||
|
# dt.strftime("%Y-%m-%d")
|
||||||
|
# return date_result
|
||||||
|
|
||||||
|
|
||||||
|
# In[36]:
|
||||||
|
def loss_animate(ax, i):
|
||||||
|
json_loss = pd.DataFrame(total_loss)
|
||||||
|
ax.plot(i)
|
||||||
|
return ax
|
||||||
|
|
||||||
|
|
||||||
|
def loader(symbol,test_size,date):
|
||||||
|
# dateparse = lambda dates : pd.datetime.strptime(dates,'%Y-%m')
|
||||||
|
# df = pd.read_csv('../dataset/IBMCUT.csv',parse_dates=['Date'], index_col = 'Date', date_parser=dateparse)
|
||||||
|
|
||||||
|
df=yf.Ticker(symbol)
|
||||||
|
# df=df.history(period="1y",interval="1d")
|
||||||
|
# df=df.history(start=date-timedelta(days=365),end=date,interval="1d")
|
||||||
|
df=df.history(start=date-timedelta(days=365*3),end=date,interval="1d")
|
||||||
|
df=df.reset_index(level=0)
|
||||||
|
|
||||||
|
# df=df.drop(columns=['Dividends'], axis=1)
|
||||||
|
df=df.drop(columns=['Stock Splits'], axis=1)
|
||||||
|
|
||||||
|
df['Up'] = df['High'].ewm(span=6,adjust=False).mean() + 2* df['High'].rolling(window=6).std()
|
||||||
|
df['Down']= df['Low'].ewm(span=8,adjust=False).mean() - 2* df['Low'].rolling(window=8).std()
|
||||||
|
|
||||||
|
df=df.dropna()
|
||||||
|
df=df.drop(df.tail(5).index)
|
||||||
|
|
||||||
|
date_ori = pd.to_datetime(df.iloc[:, 0]).tolist()
|
||||||
|
# for i in range(test_size):
|
||||||
|
# #date_ori.append(date_ori[-1] + timedelta(days = 1))
|
||||||
|
# add=1
|
||||||
|
# while ((date_ori[-1]) + timedelta(days = add)).weekday() in [5,6]:
|
||||||
|
# add=add+1
|
||||||
|
# date_ori.append(date_ori[-1] + timedelta(days = add))
|
||||||
|
date_ori = pd.Series(date_ori).dt.strftime(date_format = '%Y-%m-%d').tolist()
|
||||||
|
print(len(df),len(date_ori))
|
||||||
|
return df,date_ori
|
||||||
|
|
||||||
|
def trueloader(symbol,test_size,date):
|
||||||
|
# df2 = pd.read_csv(symbol)
|
||||||
|
# print("LENDF2:",len(df2))
|
||||||
|
|
||||||
|
df2 = yf.Ticker(symbol)
|
||||||
|
# df2 = df2.history(start=date-timedelta(days=365),end=date,interval="1d")
|
||||||
|
df2 = df2.history(start=date-timedelta(days=365*3),end=date,interval="1d")
|
||||||
|
df2 = df2.reset_index(level=0)
|
||||||
|
df2 = df2.drop(columns=['Dividends'], axis=1)
|
||||||
|
df2 = df2.drop(columns=['Stock Splits'], axis=1)
|
||||||
|
df2 = df2.drop(df2.head(7).index)
|
||||||
|
# df2 = df2.drop(df2.tail(test_size).index)
|
||||||
|
|
||||||
|
return df2
|
||||||
|
|
||||||
|
# In[38]:
|
||||||
|
|
||||||
|
def preproc(df):
|
||||||
|
minmax = MinMaxScaler().fit(df.iloc[:,1:9].astype('float32')) # Close, Volume, and all
|
||||||
|
df_log = minmax.transform(df.iloc[:, 1:9].astype('float32')) # Close, Volume, and all
|
||||||
|
df_log = pd.DataFrame(df_log)
|
||||||
|
df_log.head()
|
||||||
|
return df_log,minmax
|
||||||
|
|
||||||
|
|
||||||
|
# In[39]:
|
||||||
|
|
||||||
|
|
||||||
|
class Model:
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
learning_rate,
|
||||||
|
num_layers,
|
||||||
|
size,
|
||||||
|
size_layer,
|
||||||
|
output_size,
|
||||||
|
forget_bias = 0.1,
|
||||||
|
):
|
||||||
|
def lstm_cell(size_layer):
|
||||||
|
# print("ASDasdasdasd",len(tf.compat.v1.nn.rnn_cell.LSTMCell(size_layer, state_is_tuple = False))
|
||||||
|
return tf.compat.v1.nn.rnn_cell.LSTMCell(size_layer, state_is_tuple = False)
|
||||||
|
|
||||||
|
rnn_cells = tf.compat.v1.nn.rnn_cell.MultiRNNCell(
|
||||||
|
[lstm_cell(size_layer) for _ in range(num_layers)],
|
||||||
|
state_is_tuple = False,
|
||||||
|
)
|
||||||
|
self.X = tf.compat.v1.placeholder(tf.float32, (None, None, size))
|
||||||
|
self.Y = tf.compat.v1.placeholder(tf.float32, (None, output_size))
|
||||||
|
# self.X = tf.keras.Input((None, size),dtype=tf.float32)
|
||||||
|
# self.Y = tf.keras.Input((output_size),dtype=tf.float32)
|
||||||
|
drop = tf.compat.v1.nn.rnn_cell.DropoutWrapper(
|
||||||
|
rnn_cells, output_keep_prob = forget_bias
|
||||||
|
)
|
||||||
|
# print("XXXX:",self.X)
|
||||||
|
# print("XXXX:",self.X.shape)
|
||||||
|
# print("XXXX:",self.Y)
|
||||||
|
# print("XXXX:",self.Y.shape)
|
||||||
|
#print("LOOOASDSDASD")
|
||||||
|
self.hidden_layer = tf.compat.v1.placeholder(
|
||||||
|
tf.float32, (None, num_layers * 2 * size_layer)
|
||||||
|
)
|
||||||
|
self.outputs, self.last_state = tf.compat.v1.nn.dynamic_rnn(
|
||||||
|
drop, self.X, initial_state = self.hidden_layer, dtype = tf.float32
|
||||||
|
)
|
||||||
|
#rint("INIDIA",self.outputs)
|
||||||
|
self.logits = tf.compat.v1.layers.dense(self.outputs[-1], output_size)
|
||||||
|
self.cost = tf.reduce_mean(tf.square(self.Y - self.logits))
|
||||||
|
self.optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate).minimize(
|
||||||
|
self.cost
|
||||||
|
)
|
||||||
|
#print("cost:",self.cost)
|
||||||
|
#print("cost:",self.optimizer)
|
||||||
|
|
||||||
|
def calculate_accuracy(real, predict):
|
||||||
|
real = np.array(real) + 1
|
||||||
|
predict = np.array(predict) + 1
|
||||||
|
percentage = 1 - np.sqrt(np.mean(np.square((real - predict) / real)))
|
||||||
|
return percentage * 100
|
||||||
|
|
||||||
|
def anchor(signal, weight):
|
||||||
|
buffer = []
|
||||||
|
last = signal[0]
|
||||||
|
for i in signal:
|
||||||
|
smoothed_val = last * weight + (1 - weight) * i
|
||||||
|
buffer.append(smoothed_val)
|
||||||
|
last = smoothed_val
|
||||||
|
return buffer
|
||||||
|
|
||||||
|
|
||||||
|
# In[40]:
|
||||||
|
def main_train(df_beta, df_train, df, minmax):
|
||||||
|
|
||||||
|
modelnn = Model(
|
||||||
|
learning_rate, num_layers, df_beta.shape[1], size_layer, df_beta.shape[1], dropout_rate
|
||||||
|
)
|
||||||
|
sess = tf.compat.v1.Session()
|
||||||
|
sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(intra_op_parallelism_threads=64,inter_op_parallelism_threads=64))
|
||||||
|
sess.run(tf.compat.v1.global_variables_initializer())
|
||||||
|
pbar = tqdm(range(10), desc = 'Main train loop') # Default 500 range
|
||||||
|
for i in pbar:
|
||||||
|
init_value = np.zeros((1, num_layers * 2 * size_layer))
|
||||||
|
total_loss, total_acc = [], []
|
||||||
|
print("Degugging : ")
|
||||||
|
for k in range(0, df_train.shape[0] - 1, timestamp):
|
||||||
|
index = min(k + timestamp, df_train.shape[0] - 1)
|
||||||
|
print(index)
|
||||||
|
batch_x = np.expand_dims(
|
||||||
|
df_train.iloc[k : index, :].values, axis = 0
|
||||||
|
)
|
||||||
|
|
||||||
|
batch_y = df_train.iloc[k + 1 : index + 1, :].values
|
||||||
|
#print("BATCH_X:",batch_x)
|
||||||
|
#print("BATCH_Y:",batch_y)
|
||||||
|
logits, last_state,__,loss = sess.run(
|
||||||
|
[modelnn.logits, modelnn.last_state,modelnn.optimizer, modelnn.cost],
|
||||||
|
feed_dict = {
|
||||||
|
modelnn.X: batch_x,
|
||||||
|
modelnn.Y: batch_y,
|
||||||
|
modelnn.hidden_layer: init_value,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
init_value = last_state
|
||||||
|
total_loss.append(loss)
|
||||||
|
total_acc.append(calculate_accuracy(batch_y[:, 0], logits[:, 0]))
|
||||||
|
# json_loss.to_json("./loss.json")
|
||||||
|
graph_loss.append(np.mean(total_loss))
|
||||||
|
|
||||||
|
#np.save(loss_file, np.array(graph_loss))
|
||||||
|
pbar.set_postfix(cost = np.mean(total_loss), min_acc = np.min(total_acc), mean_acc=np.mean(total_acc))
|
||||||
|
|
||||||
|
|
||||||
|
def forecast(df_beta,df_train,df,minmax):
|
||||||
|
# print("DF_BETA:",df_beta)
|
||||||
|
# print("DF_TRAIN:",df_train)
|
||||||
|
# tf.compat.v1.variable_scope("AAA", reuse=True)
|
||||||
|
tf.compat.v1.reset_default_graph()
|
||||||
|
modelnn = Model(
|
||||||
|
learning_rate, num_layers, df_beta.shape[1], size_layer, df_beta.shape[1], dropout_rate
|
||||||
|
)
|
||||||
|
# print("MODELX: ",modelnn.X)
|
||||||
|
# print("MODELY: ",modelnn.Y)
|
||||||
|
# print("MODELLayer: ",modelnn.hidden_layer)
|
||||||
|
sess = tf.compat.v1.Session()
|
||||||
|
sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(intra_op_parallelism_threads=64,inter_op_parallelism_threads=64))
|
||||||
|
sess.run(tf.compat.v1.global_variables_initializer())
|
||||||
|
date_ori = pd.to_datetime(df.iloc[:, 0]).tolist()
|
||||||
|
# print("INI___!:",df_train.shape[0] - 1, timestamp)
|
||||||
|
# print(df_train.shape[0])
|
||||||
|
pbar = tqdm(range(epoch), desc = 'train loop')
|
||||||
|
for i in pbar:
|
||||||
|
# init_value = np.zeros((1, num_layers * 2 * size_layer))
|
||||||
|
total_loss, total_acc = [], []
|
||||||
|
print("Degugging : ")
|
||||||
|
for k in range(0, df_train.shape[0] - 1, timestamp):
|
||||||
|
init_value = np.zeros((1, num_layers * 2 * size_layer))
|
||||||
|
index = min(k + timestamp, df_train.shape[0] - 1)
|
||||||
|
# print(index)
|
||||||
|
batch_x = np.expand_dims(
|
||||||
|
df_train.iloc[k : index, :].values, axis = 0
|
||||||
|
)
|
||||||
|
|
||||||
|
batch_y = df_train.iloc[k + 1 : index + 1, :].values
|
||||||
|
print("BATCH_X:",batch_x)
|
||||||
|
print("BATCH_Y:",batch_y)
|
||||||
|
logits, last_state,__,loss = sess.run(
|
||||||
|
[modelnn.logits, modelnn.last_state,modelnn.optimizer, modelnn.cost],
|
||||||
|
feed_dict = {
|
||||||
|
modelnn.X: batch_x,
|
||||||
|
modelnn.Y: batch_y,
|
||||||
|
modelnn.hidden_layer: init_value,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
# print("BATCHX:",batch_x)
|
||||||
|
# print("MODELX: ",modelnn.X)
|
||||||
|
# print("MODELY: ",modelnn.Y)
|
||||||
|
# print("MODELLayer: ",modelnn.hidden_layer)
|
||||||
|
# print("OUTSSS:",len(outs))
|
||||||
|
# print("opt:",opt)
|
||||||
|
# print("outs1",batch_x[0])
|
||||||
|
# print("outs2",outs[1])
|
||||||
|
# print("outs3",outs[2])
|
||||||
|
# print("outs4",outs[3])
|
||||||
|
# input()
|
||||||
|
init_value = last_state
|
||||||
|
total_loss.append(loss)
|
||||||
|
#print("LOGITS:",logits[:, 0])
|
||||||
|
total_acc.append(calculate_accuracy(batch_y[:, 0], logits[:, 0]))
|
||||||
|
# json_loss.to_json("./loss.json")
|
||||||
|
graph_loss.append(np.mean(total_loss))
|
||||||
|
|
||||||
|
#np.save(loss_file, np.array(graph_loss))
|
||||||
|
pbar.set_postfix(cost = np.mean(total_loss), min_acc = np.min(total_acc), mean_acc=np.mean(total_acc))
|
||||||
|
future_day = test_size
|
||||||
|
|
||||||
|
output_predict = np.zeros((df_train.shape[0] + future_day, df_train.shape[1]))
|
||||||
|
output_predict[0] = df_train.iloc[0]
|
||||||
|
upper_b = (df_train.shape[0] // timestamp) * timestamp
|
||||||
|
init_value = np.zeros((1, num_layers * 2 * size_layer))
|
||||||
|
|
||||||
|
for k in range(0, (df_train.shape[0] // timestamp) * timestamp, timestamp):
|
||||||
|
out_logits, last_state = sess.run(
|
||||||
|
[modelnn.logits, modelnn.last_state],
|
||||||
|
feed_dict = {
|
||||||
|
modelnn.X: np.expand_dims(
|
||||||
|
df_train.iloc[k : k + timestamp], axis = 0
|
||||||
|
),
|
||||||
|
modelnn.hidden_layer: init_value,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
init_value = last_state
|
||||||
|
output_predict[k + 1 : k + timestamp + 1] = out_logits
|
||||||
|
|
||||||
|
if upper_b != df_train.shape[0]:
|
||||||
|
out_logits, last_state = sess.run(
|
||||||
|
[modelnn.logits, modelnn.last_state],
|
||||||
|
feed_dict = {
|
||||||
|
modelnn.X: np.expand_dims(df_train.iloc[upper_b:], axis = 0),
|
||||||
|
modelnn.hidden_layer: init_value,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
output_predict[upper_b + 1 : df_train.shape[0] + 1] = out_logits
|
||||||
|
future_day -= 1
|
||||||
|
date_ori.append(date_ori[-1] + timedelta(days = 1))
|
||||||
|
|
||||||
|
init_value = last_state
|
||||||
|
|
||||||
|
for i in range(future_day):
|
||||||
|
o = output_predict[-future_day - timestamp + i:-future_day + i]
|
||||||
|
out_logits, last_state = sess.run(
|
||||||
|
[modelnn.logits, modelnn.last_state],
|
||||||
|
feed_dict = {
|
||||||
|
modelnn.X: np.expand_dims(o, axis = 0),
|
||||||
|
modelnn.hidden_layer: init_value,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
init_value = last_state
|
||||||
|
output_predict[-future_day + i] = out_logits[-1]
|
||||||
|
date_ori.append(date_ori[-1] + timedelta(days = 1))
|
||||||
|
|
||||||
|
output_predict = minmax.inverse_transform(output_predict)
|
||||||
|
deep_future = anchor(output_predict[:, 0], 0.3)
|
||||||
|
sess.close()
|
||||||
|
sess.__del__()
|
||||||
|
return deep_future
|
||||||
|
|
||||||
|
|
||||||
|
# In[41]:
|
||||||
|
|
||||||
|
|
||||||
|
def newaccuration(accepted_results,truetrend):
|
||||||
|
hasilutama=0
|
||||||
|
indexbagus=0
|
||||||
|
truest=0
|
||||||
|
predictest=0
|
||||||
|
for i,x in enumerate(accepted_results):
|
||||||
|
a=x[-(test_size+2):]
|
||||||
|
|
||||||
|
#a=x[:((test_size+2)/2)]
|
||||||
|
print("a",a)
|
||||||
|
b=truetrend[-(test_size+1):]
|
||||||
|
#print("b",b)
|
||||||
|
hasil=0
|
||||||
|
true=[]
|
||||||
|
predict=[]
|
||||||
|
for xy in range(1,len((a))):
|
||||||
|
if a[xy]<a[xy-1]:
|
||||||
|
predict.append("Down")
|
||||||
|
else:
|
||||||
|
predict.append("Up")
|
||||||
|
if b[xy]<b[xy-1]:
|
||||||
|
|
||||||
|
true.append("Down")
|
||||||
|
else:
|
||||||
|
true.append("Up")
|
||||||
|
|
||||||
|
print(true)
|
||||||
|
print(predict)
|
||||||
|
for xz in range(len(true)):
|
||||||
|
if true[xz]==predict[xz]:
|
||||||
|
hasil=hasil+1
|
||||||
|
if hasil > hasilutama:
|
||||||
|
hasilutama=hasil
|
||||||
|
indexbagus=i
|
||||||
|
truest=true
|
||||||
|
predictest=predict
|
||||||
|
salah=[]
|
||||||
|
for xz in range(len(truest)):
|
||||||
|
if truest[xz]!=predictest[xz]:
|
||||||
|
salah.append(xz)
|
||||||
|
# if xz!=0:
|
||||||
|
# salah.append(xz-1)
|
||||||
|
# print("INI:",b)
|
||||||
|
print("TRUEST",truest)
|
||||||
|
print("predictest",predictest)
|
||||||
|
return hasilutama,indexbagus,salah
|
||||||
|
|
||||||
|
# In[42]:
|
||||||
|
|
||||||
|
|
||||||
|
def betaforecast(simulationsize,dfx,dftrain,df,df2,minmax):
|
||||||
|
results = []
|
||||||
|
for i in range(simulationsize):
|
||||||
|
forecast_res = forecast(df,dftrain,dfx,minmax)
|
||||||
|
results.append(forecast_res)
|
||||||
|
accepted_results = []
|
||||||
|
|
||||||
|
while not (np.array(results[0][-test_size:]) < np.min(dfx['Close'])).sum() == 0 and (np.array(results[0][-test_size:]) > np.max(dfx['Close']) * 2).sum() == 0:
|
||||||
|
print("++++++++++++++++++++++++")
|
||||||
|
print("Forecast Recalled...")
|
||||||
|
results[0]=forecast(df,dftrain,dfx,minmax)
|
||||||
|
return results[0]
|
||||||
|
|
||||||
|
|
||||||
|
# In[43]:
|
||||||
|
|
||||||
|
|
||||||
|
def interval(p1,p2):
|
||||||
|
return abs((p1) - (p2))
|
||||||
|
|
||||||
|
|
||||||
|
# In[44]:
|
||||||
|
|
||||||
|
|
||||||
|
def checkaccuracy2(true):
|
||||||
|
avg=[]
|
||||||
|
for x in range(len(true)-7):
|
||||||
|
avg.append(interval(true[x],true[x+1]))
|
||||||
|
average=sum(avg) / len(avg)
|
||||||
|
return average
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# In[45]:
|
||||||
|
|
||||||
|
|
||||||
|
def checkaccuracy(predict,true,filterx, test_size):
|
||||||
|
print("True Length: ",len(true))
|
||||||
|
print("Predict Length: ",len(predict))
|
||||||
|
|
||||||
|
# avg=[]
|
||||||
|
|
||||||
|
# for x in range(len(true)-5):
|
||||||
|
# avg.append(interval(true[x],predict[x]))
|
||||||
|
# average=sum(avg) / len(avg)
|
||||||
|
# print("AVG1:",average)
|
||||||
|
# print("AVG2:",threshold)
|
||||||
|
|
||||||
|
temp_predict=predict[-test_size:]
|
||||||
|
temp_true=true[-test_size:]
|
||||||
|
|
||||||
|
# avg2=interval(max(predict),min(predict))
|
||||||
|
count=0
|
||||||
|
print("------------------------------------")
|
||||||
|
for x in range(test_size):
|
||||||
|
# acc_var1 = temp_true[x]-(1/filterx*temp_true[x])
|
||||||
|
acc_var1 = temp_true[x]-(filterx/10)
|
||||||
|
acc_var2 = temp_predict[x]
|
||||||
|
# acc_var3 = temp_true[x]+(1/filterx*temp_true[x])
|
||||||
|
acc_var3 = temp_true[x]+(filterx/10)
|
||||||
|
acc_condition = acc_var1 <= acc_var2 <= acc_var3
|
||||||
|
# print("Var 1 : ",acc_var1)
|
||||||
|
# print("Var 2 : ",acc_var2)
|
||||||
|
# print("Var 3 : ",acc_var3)
|
||||||
|
# print("Day "+str(x+1)+" "+str(int(acc_var1))+" "+str(int(acc_var2))+" "+str(int(acc_var3))+" : ",acc_condition)
|
||||||
|
print("Day "+str(x+1)+", Price : "+str(int(temp_true[x]))+" ,Gap = "+str(int(abs(temp_predict[x]-temp_true[x])))+" : ",acc_condition)
|
||||||
|
if (acc_condition):
|
||||||
|
count=count+1
|
||||||
|
print("------------------------------------")
|
||||||
|
if count>7:
|
||||||
|
print("Result True")
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
print("Result False")
|
||||||
|
return False
|
||||||
|
|
||||||
|
# if average>threshold:
|
||||||
|
# return False
|
||||||
|
# else:
|
||||||
|
# return True
|
||||||
|
|
||||||
|
|
||||||
|
# In[46]:
|
||||||
|
|
||||||
|
|
||||||
|
def findthreshold(simulationsize,dfx,dftrain,df,df2,minmax):
|
||||||
|
results=[]
|
||||||
|
for i in range(simulationsize):
|
||||||
|
results.append(forecast(df,dftrain,dfx,minmax))
|
||||||
|
accepted_results = []
|
||||||
|
|
||||||
|
for r in results:
|
||||||
|
|
||||||
|
if (np.array(r[-test_size:]) < np.min(dfx['Close'])).sum() == 0 and (np.array(r[-test_size:]) > np.max(dfx['Close']) * 2).sum() == 0:
|
||||||
|
accepted_results.append(r)
|
||||||
|
|
||||||
|
finalavg=999999
|
||||||
|
for o in accepted_results:
|
||||||
|
avg=[]
|
||||||
|
for x in range(len(o)-5):
|
||||||
|
avg.append(interval(o[x],df2[x]))
|
||||||
|
average=sum(avg) / len(avg)
|
||||||
|
if average<=finalavg:
|
||||||
|
finalavg=average
|
||||||
|
|
||||||
|
return finalavg
|
||||||
|
|
||||||
|
def temp_data(date, xi, resultfinal, df2, date_col,x):
|
||||||
|
print("Called . . . ")
|
||||||
|
if os.path.isdir("TempData/") == False:
|
||||||
|
os.mkdir("TempData/")
|
||||||
|
if os.path.isdir("TempData/%s"%x) == False:
|
||||||
|
os.mkdir("TempData/%s"%x)
|
||||||
|
if os.path.isdir("TempData/%s/"%x+str(date)) == False:
|
||||||
|
os.mkdir("TempData/%s/"%x+str(date))
|
||||||
|
with open("TempData/%s/"%x+str(date)+"/"+x+str(xi)+".vezpal2","w+") as oop:
|
||||||
|
main=[]
|
||||||
|
main.append(resultfinal) # prediction
|
||||||
|
main.append(list(df2['Close']))
|
||||||
|
main.append(date_col)
|
||||||
|
# main.append(3)
|
||||||
|
# main.append([0])
|
||||||
|
json.dump(main,oop)
|
||||||
|
|
||||||
|
def automaton(simulationsize,date):
|
||||||
|
|
||||||
|
# symbols=["AAPL"]
|
||||||
|
symbols = sys.argv[1]
|
||||||
|
times=[]
|
||||||
|
x=symbols
|
||||||
|
# for x in symbols:
|
||||||
|
temp_time=[]
|
||||||
|
temp_time.append(x)
|
||||||
|
counter=0
|
||||||
|
validity=0
|
||||||
|
df,date_col=loader(x,test_size,date)
|
||||||
|
# print(type(df))
|
||||||
|
dfx=df
|
||||||
|
# print("ASDSAD")
|
||||||
|
df2=trueloader(x,test_size,date)
|
||||||
|
df,minmax=preproc(df)
|
||||||
|
dftrain=df
|
||||||
|
wrong=[1,2,3,4,5]
|
||||||
|
# avg=checkaccuracy2(list(df2["Close"]))
|
||||||
|
# start=time.time()
|
||||||
|
# avg=findthreshold(50,dfx,dftrain,df,list(df2["Close"]),minmax)
|
||||||
|
# temp_time.append(time.time()-start)
|
||||||
|
start=time.time()
|
||||||
|
filterx = int(prediction_gap)
|
||||||
|
able=False
|
||||||
|
print("============== || Initial Train || =============")
|
||||||
|
main_train(df,dftrain,dfx,minmax)
|
||||||
|
for xi in range(5):
|
||||||
|
decision=False
|
||||||
|
while (decision==False):
|
||||||
|
print()
|
||||||
|
print("====== [ Foreacasting Attempt : "+str(counter+1)+" ] ===========")
|
||||||
|
print("====== [ Progress : "+str(xi)+"/5 ] ")
|
||||||
|
resultfinal=betaforecast(simulationsize,dfx,dftrain,df,df2,minmax)
|
||||||
|
# validity=valid
|
||||||
|
decision=checkaccuracy(resultfinal,list(df2["Close"]),filterx, test_size)
|
||||||
|
# wrong=invalid
|
||||||
|
if decision==True:
|
||||||
|
able=True
|
||||||
|
print("ABLE")
|
||||||
|
print(str(filterx))
|
||||||
|
if counter > 10 and decision != True:
|
||||||
|
counter = 0
|
||||||
|
filterx=filterx+10
|
||||||
|
print("Filter X new value : "+str(filterx))
|
||||||
|
print("Decision Status : ", decision)
|
||||||
|
print("**************************************")
|
||||||
|
# avg=avg+(1/3*avg)
|
||||||
|
if filterx>1000:
|
||||||
|
print("====== [ GG, we gave up] =====")
|
||||||
|
continue
|
||||||
|
counter=counter+1
|
||||||
|
temp_data(date, xi, resultfinal, df2, date_col, x)
|
||||||
|
print("[ Loop : "+x+" done ] =========================")
|
||||||
|
print()
|
||||||
|
|
||||||
|
if os.path.isdir("Backtest/") == False:
|
||||||
|
os.mkdir("Backtest/")
|
||||||
|
if os.path.isdir("Backtest/%s"%x) == False:
|
||||||
|
os.mkdir("Backtest/%s"%x)
|
||||||
|
if os.path.isdir("Backtest/%s/"%x+str(date)) == False:
|
||||||
|
os.mkdir("Backtest/%s/"%x+str(date))
|
||||||
|
with open("Backtest/%s/"%x+str(date)+"/"+x+str(xi)+".vezpal2","w+") as oop:
|
||||||
|
main=[]
|
||||||
|
main.append(resultfinal) #prediction
|
||||||
|
main.append(list(df2['Close']))
|
||||||
|
main.append(date_col)
|
||||||
|
# main.append(3)
|
||||||
|
# main.append([0])
|
||||||
|
json.dump(main,oop)
|
||||||
|
print("Time for %s :"%x,time.time()-start)
|
||||||
|
temp_time.append(time.time()-start)
|
||||||
|
times.append(temp_time)
|
||||||
|
return times
|
||||||
|
|
||||||
|
def predictor(simulationsize,current):
|
||||||
|
for x in range(52):
|
||||||
|
tf.compat.v1.reset_default_graph()
|
||||||
|
current+=timedelta(days=7)
|
||||||
|
automaton(simulationsize,current)
|
||||||
|
|
||||||
|
current_date=date(2020,1,1)
|
||||||
|
if os.path.isdir("Loss/") == False:
|
||||||
|
os.mkdir("Loss/")
|
||||||
|
if os.path.isdir("Loss/"+str(current_date)) == False:
|
||||||
|
os.mkdir("Loss/"+str(current_date))
|
||||||
|
|
||||||
|
# loss_file = time.strftime("%Y%m%d-%H%M%S")
|
||||||
|
# loss_file = "Loss/"+str(date.today())+"/"+loss_file
|
||||||
|
global_start = time.time()
|
||||||
|
# profile = cProfile.Profile()
|
||||||
|
|
||||||
|
# main_func = "predictor(simulation_size,current_date)"
|
||||||
|
predictor(simulation_size,current_date)
|
||||||
|
# ps = pstats.Stats(profile.run(main_func))
|
||||||
|
print("Overall time consumption ", str(time.time()-global_start))
|
||||||
|
# ps.dump_stats("./Cprofile_model_01.ps")
|
||||||
|
|
624
predictor_backup.py
Normal file
624
predictor_backup.py
Normal file
@ -0,0 +1,624 @@
|
|||||||
|
import os
|
||||||
|
import sys
|
||||||
|
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
|
||||||
|
import tensorflow as tf
|
||||||
|
import numpy as np
|
||||||
|
import matplotlib.animation as animation
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
import seaborn as sns
|
||||||
|
import pandas as pd
|
||||||
|
from sklearn.preprocessing import MinMaxScaler
|
||||||
|
from datetime import datetime
|
||||||
|
from datetime import timedelta
|
||||||
|
from tqdm import tqdm
|
||||||
|
sns.set()
|
||||||
|
import json
|
||||||
|
tf.compat.v1.random.set_random_seed(1234)
|
||||||
|
from matplotlib import style
|
||||||
|
# import matplotlib.backends.backend_qt5agg
|
||||||
|
# %matplotlib qt
|
||||||
|
style.use('ggplot')
|
||||||
|
import math
|
||||||
|
import yfinance as yf
|
||||||
|
import time
|
||||||
|
from datetime import date, timedelta
|
||||||
|
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
|
||||||
|
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
|
||||||
|
import cProfile
|
||||||
|
import pstats
|
||||||
|
|
||||||
|
# In[34]:
|
||||||
|
|
||||||
|
symbols=os.listdir()
|
||||||
|
symbols
|
||||||
|
tf.compat.v1.disable_eager_execution()
|
||||||
|
# INITIAL VARS
|
||||||
|
test_size = 14
|
||||||
|
simulation_size = 1
|
||||||
|
|
||||||
|
# MODEL VARS
|
||||||
|
num_layers = 2
|
||||||
|
size_layer = 128
|
||||||
|
timestamp = 7
|
||||||
|
epoch = 20
|
||||||
|
dropout_rate = 0.8
|
||||||
|
prediction_gap = sys.argv[2]
|
||||||
|
future_day = test_size
|
||||||
|
learning_rate = 0.01
|
||||||
|
graph_loss = []
|
||||||
|
|
||||||
|
# In[35]:
|
||||||
|
# Necessary Dirs
|
||||||
|
# def date_manage(date1,date2=None):
|
||||||
|
# if date2 is None:
|
||||||
|
# date2=date1+timedelta(days=365)
|
||||||
|
# date_col=[]
|
||||||
|
# for n in range(int ((date2 - date1).days)+1):
|
||||||
|
# date_col.append(date1 + timedelta(n))
|
||||||
|
# weekdays = [5,6]
|
||||||
|
# date_result=[]
|
||||||
|
# for dt in date_col:
|
||||||
|
# if dt.weekday() not in weekdays:
|
||||||
|
# dt.strftime("%Y-%m-%d")
|
||||||
|
# return date_result
|
||||||
|
|
||||||
|
|
||||||
|
# In[36]:
|
||||||
|
def loss_animate(ax, i):
|
||||||
|
json_loss = pd.DataFrame(total_loss)
|
||||||
|
ax.plot(i)
|
||||||
|
return ax
|
||||||
|
|
||||||
|
|
||||||
|
def loader(symbol,test_size,date):
|
||||||
|
# dateparse = lambda dates : pd.datetime.strptime(dates,'%Y-%m')
|
||||||
|
# df = pd.read_csv('../dataset/IBMCUT.csv',parse_dates=['Date'], index_col = 'Date', date_parser=dateparse)
|
||||||
|
|
||||||
|
df=yf.Ticker(symbol)
|
||||||
|
# df=df.history(period="1y",interval="1d")
|
||||||
|
# df=df.history(start=date-timedelta(days=365),end=date,interval="1d")
|
||||||
|
df=df.history(start=date-timedelta(days=365*3),end=date,interval="1d")
|
||||||
|
df=df.reset_index(level=0)
|
||||||
|
|
||||||
|
# df=df.drop(columns=['Dividends'], axis=1)
|
||||||
|
df=df.drop(columns=['Stock Splits'], axis=1)
|
||||||
|
|
||||||
|
df['Up'] = df['High'].ewm(span=6,adjust=False).mean() + 2* df['High'].rolling(window=6).std()
|
||||||
|
df['Down']= df['Low'].ewm(span=8,adjust=False).mean() - 2* df['Low'].rolling(window=8).std()
|
||||||
|
|
||||||
|
df=df.dropna()
|
||||||
|
df=df.drop(df.tail(5).index)
|
||||||
|
|
||||||
|
date_ori = pd.to_datetime(df.iloc[:, 0]).tolist()
|
||||||
|
# for i in range(test_size):
|
||||||
|
# #date_ori.append(date_ori[-1] + timedelta(days = 1))
|
||||||
|
# add=1
|
||||||
|
# while ((date_ori[-1]) + timedelta(days = add)).weekday() in [5,6]:
|
||||||
|
# add=add+1
|
||||||
|
# date_ori.append(date_ori[-1] + timedelta(days = add))
|
||||||
|
date_ori = pd.Series(date_ori).dt.strftime(date_format = '%Y-%m-%d').tolist()
|
||||||
|
print(len(df),len(date_ori))
|
||||||
|
return df,date_ori
|
||||||
|
|
||||||
|
def trueloader(symbol,test_size,date):
|
||||||
|
# df2 = pd.read_csv(symbol)
|
||||||
|
# print("LENDF2:",len(df2))
|
||||||
|
|
||||||
|
df2 = yf.Ticker(symbol)
|
||||||
|
# df2 = df2.history(start=date-timedelta(days=365),end=date,interval="1d")
|
||||||
|
df2 = df2.history(start=date-timedelta(days=365*3),end=date,interval="1d")
|
||||||
|
df2 = df2.reset_index(level=0)
|
||||||
|
df2 = df2.drop(columns=['Dividends'], axis=1)
|
||||||
|
df2 = df2.drop(columns=['Stock Splits'], axis=1)
|
||||||
|
df2 = df2.drop(df2.head(7).index)
|
||||||
|
# df2 = df2.drop(df2.tail(test_size).index)
|
||||||
|
|
||||||
|
return df2
|
||||||
|
|
||||||
|
# In[38]:
|
||||||
|
|
||||||
|
def preproc(df):
|
||||||
|
minmax = MinMaxScaler().fit(df.iloc[:,1:9].astype('float32')) # Close, Volume, and all
|
||||||
|
df_log = minmax.transform(df.iloc[:, 1:9].astype('float32')) # Close, Volume, and all
|
||||||
|
df_log = pd.DataFrame(df_log)
|
||||||
|
df_log.head()
|
||||||
|
return df_log,minmax
|
||||||
|
|
||||||
|
|
||||||
|
# In[39]:
|
||||||
|
|
||||||
|
|
||||||
|
class Model:
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
learning_rate,
|
||||||
|
num_layers,
|
||||||
|
size,
|
||||||
|
size_layer,
|
||||||
|
output_size,
|
||||||
|
forget_bias = 0.1,
|
||||||
|
):
|
||||||
|
def lstm_cell(size_layer):
|
||||||
|
# print("ASDasdasdasd",len(tf.compat.v1.nn.rnn_cell.LSTMCell(size_layer, state_is_tuple = False))
|
||||||
|
return tf.compat.v1.nn.rnn_cell.LSTMCell(size_layer, state_is_tuple = False)
|
||||||
|
|
||||||
|
rnn_cells = tf.compat.v1.nn.rnn_cell.MultiRNNCell(
|
||||||
|
[lstm_cell(size_layer) for _ in range(num_layers)],
|
||||||
|
state_is_tuple = False,
|
||||||
|
)
|
||||||
|
self.X = tf.compat.v1.placeholder(tf.float32, (None, None, size))
|
||||||
|
self.Y = tf.compat.v1.placeholder(tf.float32, (None, output_size))
|
||||||
|
# self.X = tf.keras.Input((None, size),dtype=tf.float32)
|
||||||
|
# self.Y = tf.keras.Input((output_size),dtype=tf.float32)
|
||||||
|
drop = tf.compat.v1.nn.rnn_cell.DropoutWrapper(
|
||||||
|
rnn_cells, output_keep_prob = forget_bias
|
||||||
|
)
|
||||||
|
# print("XXXX:",self.X)
|
||||||
|
# print("XXXX:",self.X.shape)
|
||||||
|
# print("XXXX:",self.Y)
|
||||||
|
# print("XXXX:",self.Y.shape)
|
||||||
|
#print("LOOOASDSDASD")
|
||||||
|
self.hidden_layer = tf.compat.v1.placeholder(
|
||||||
|
tf.float32, (None, num_layers * 2 * size_layer)
|
||||||
|
)
|
||||||
|
self.outputs, self.last_state = tf.compat.v1.nn.dynamic_rnn(
|
||||||
|
drop, self.X, initial_state = self.hidden_layer, dtype = tf.float32
|
||||||
|
)
|
||||||
|
#rint("INIDIA",self.outputs)
|
||||||
|
self.logits = tf.compat.v1.layers.dense(self.outputs[-1], output_size)
|
||||||
|
self.cost = tf.reduce_mean(tf.square(self.Y - self.logits))
|
||||||
|
self.optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate).minimize(
|
||||||
|
self.cost
|
||||||
|
)
|
||||||
|
#print("cost:",self.cost)
|
||||||
|
#print("cost:",self.optimizer)
|
||||||
|
|
||||||
|
def calculate_accuracy(real, predict):
|
||||||
|
real = np.array(real) + 1
|
||||||
|
predict = np.array(predict) + 1
|
||||||
|
percentage = 1 - np.sqrt(np.mean(np.square((real - predict) / real)))
|
||||||
|
return percentage * 100
|
||||||
|
|
||||||
|
def anchor(signal, weight):
|
||||||
|
buffer = []
|
||||||
|
last = signal[0]
|
||||||
|
for i in signal:
|
||||||
|
smoothed_val = last * weight + (1 - weight) * i
|
||||||
|
buffer.append(smoothed_val)
|
||||||
|
last = smoothed_val
|
||||||
|
return buffer
|
||||||
|
|
||||||
|
|
||||||
|
# In[40]:
|
||||||
|
def main_train(df_beta, df_train, df, minmax):
|
||||||
|
|
||||||
|
modelnn = Model(
|
||||||
|
learning_rate, num_layers, df_beta.shape[1], size_layer, df_beta.shape[1], dropout_rate
|
||||||
|
)
|
||||||
|
sess = tf.compat.v1.Session()
|
||||||
|
sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(intra_op_parallelism_threads=64,inter_op_parallelism_threads=64))
|
||||||
|
sess.run(tf.compat.v1.global_variables_initializer())
|
||||||
|
pbar = tqdm(range(10), desc = 'Main train loop') # Default 500 range
|
||||||
|
for i in pbar:
|
||||||
|
init_value = np.zeros((1, num_layers * 2 * size_layer))
|
||||||
|
total_loss, total_acc = [], []
|
||||||
|
print("Degugging : ")
|
||||||
|
for k in range(0, df_train.shape[0] - 1, timestamp):
|
||||||
|
index = min(k + timestamp, df_train.shape[0] - 1)
|
||||||
|
print(index)
|
||||||
|
batch_x = np.expand_dims(
|
||||||
|
df_train.iloc[k : index, :].values, axis = 0
|
||||||
|
)
|
||||||
|
|
||||||
|
batch_y = df_train.iloc[k + 1 : index + 1, :].values
|
||||||
|
#print("BATCH_X:",batch_x)
|
||||||
|
#print("BATCH_Y:",batch_y)
|
||||||
|
logits, last_state,__,loss = sess.run(
|
||||||
|
[modelnn.logits, modelnn.last_state,modelnn.optimizer, modelnn.cost],
|
||||||
|
feed_dict = {
|
||||||
|
modelnn.X: batch_x,
|
||||||
|
modelnn.Y: batch_y,
|
||||||
|
modelnn.hidden_layer: init_value,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
init_value = last_state
|
||||||
|
total_loss.append(loss)
|
||||||
|
total_acc.append(calculate_accuracy(batch_y[:, 0], logits[:, 0]))
|
||||||
|
# json_loss.to_json("./loss.json")
|
||||||
|
graph_loss.append(np.mean(total_loss))
|
||||||
|
|
||||||
|
#np.save(loss_file, np.array(graph_loss))
|
||||||
|
pbar.set_postfix(cost = np.mean(total_loss), min_acc = np.min(total_acc), mean_acc=np.mean(total_acc))
|
||||||
|
|
||||||
|
|
||||||
|
def forecast(df_beta,df_train,df,minmax):
|
||||||
|
# print("DF_BETA:",df_beta)
|
||||||
|
# print("DF_TRAIN:",df_train)
|
||||||
|
# tf.compat.v1.variable_scope("AAA", reuse=True)
|
||||||
|
tf.compat.v1.reset_default_graph()
|
||||||
|
modelnn = Model(
|
||||||
|
learning_rate, num_layers, df_beta.shape[1], size_layer, df_beta.shape[1], dropout_rate
|
||||||
|
)
|
||||||
|
# print("MODELX: ",modelnn.X)
|
||||||
|
# print("MODELY: ",modelnn.Y)
|
||||||
|
# print("MODELLayer: ",modelnn.hidden_layer)
|
||||||
|
sess = tf.compat.v1.Session()
|
||||||
|
sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(intra_op_parallelism_threads=64,inter_op_parallelism_threads=64))
|
||||||
|
sess.run(tf.compat.v1.global_variables_initializer())
|
||||||
|
date_ori = pd.to_datetime(df.iloc[:, 0]).tolist()
|
||||||
|
# print("INI___!:",df_train.shape[0] - 1, timestamp)
|
||||||
|
# print(df_train.shape[0])
|
||||||
|
pbar = tqdm(range(epoch), desc = 'train loop')
|
||||||
|
for i in pbar:
|
||||||
|
# init_value = np.zeros((1, num_layers * 2 * size_layer))
|
||||||
|
total_loss, total_acc = [], []
|
||||||
|
print("Degugging : ")
|
||||||
|
for k in range(0, df_train.shape[0] - 1, timestamp):
|
||||||
|
init_value = np.zeros((1, num_layers * 2 * size_layer))
|
||||||
|
index = min(k + timestamp, df_train.shape[0] - 1)
|
||||||
|
# print(index)
|
||||||
|
batch_x = np.expand_dims(
|
||||||
|
df_train.iloc[k : index, :].values, axis = 0
|
||||||
|
)
|
||||||
|
|
||||||
|
batch_y = df_train.iloc[k + 1 : index + 1, :].values
|
||||||
|
print("BATCH_X:",batch_x)
|
||||||
|
print("BATCH_Y:",batch_y)
|
||||||
|
logits, last_state,__,loss = sess.run(
|
||||||
|
[modelnn.logits, modelnn.last_state,modelnn.optimizer, modelnn.cost],
|
||||||
|
feed_dict = {
|
||||||
|
modelnn.X: batch_x,
|
||||||
|
modelnn.Y: batch_y,
|
||||||
|
modelnn.hidden_layer: init_value,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
# print("BATCHX:",batch_x)
|
||||||
|
# print("MODELX: ",modelnn.X)
|
||||||
|
# print("MODELY: ",modelnn.Y)
|
||||||
|
# print("MODELLayer: ",modelnn.hidden_layer)
|
||||||
|
# print("OUTSSS:",len(outs))
|
||||||
|
# print("opt:",opt)
|
||||||
|
# print("outs1",batch_x[0])
|
||||||
|
# print("outs2",outs[1])
|
||||||
|
# print("outs3",outs[2])
|
||||||
|
# print("outs4",outs[3])
|
||||||
|
# input()
|
||||||
|
init_value = last_state
|
||||||
|
total_loss.append(loss)
|
||||||
|
#print("LOGITS:",logits[:, 0])
|
||||||
|
total_acc.append(calculate_accuracy(batch_y[:, 0], logits[:, 0]))
|
||||||
|
# json_loss.to_json("./loss.json")
|
||||||
|
graph_loss.append(np.mean(total_loss))
|
||||||
|
|
||||||
|
#np.save(loss_file, np.array(graph_loss))
|
||||||
|
pbar.set_postfix(cost = np.mean(total_loss), min_acc = np.min(total_acc), mean_acc=np.mean(total_acc))
|
||||||
|
future_day = test_size
|
||||||
|
|
||||||
|
output_predict = np.zeros((df_train.shape[0] + future_day, df_train.shape[1]))
|
||||||
|
output_predict[0] = df_train.iloc[0]
|
||||||
|
upper_b = (df_train.shape[0] // timestamp) * timestamp
|
||||||
|
init_value = np.zeros((1, num_layers * 2 * size_layer))
|
||||||
|
|
||||||
|
for k in range(0, (df_train.shape[0] // timestamp) * timestamp, timestamp):
|
||||||
|
out_logits, last_state = sess.run(
|
||||||
|
[modelnn.logits, modelnn.last_state],
|
||||||
|
feed_dict = {
|
||||||
|
modelnn.X: np.expand_dims(
|
||||||
|
df_train.iloc[k : k + timestamp], axis = 0
|
||||||
|
),
|
||||||
|
modelnn.hidden_layer: init_value,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
init_value = last_state
|
||||||
|
output_predict[k + 1 : k + timestamp + 1] = out_logits
|
||||||
|
|
||||||
|
if upper_b != df_train.shape[0]:
|
||||||
|
out_logits, last_state = sess.run(
|
||||||
|
[modelnn.logits, modelnn.last_state],
|
||||||
|
feed_dict = {
|
||||||
|
modelnn.X: np.expand_dims(df_train.iloc[upper_b:], axis = 0),
|
||||||
|
modelnn.hidden_layer: init_value,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
output_predict[upper_b + 1 : df_train.shape[0] + 1] = out_logits
|
||||||
|
future_day -= 1
|
||||||
|
date_ori.append(date_ori[-1] + timedelta(days = 1))
|
||||||
|
|
||||||
|
init_value = last_state
|
||||||
|
|
||||||
|
for i in range(future_day):
|
||||||
|
o = output_predict[-future_day - timestamp + i:-future_day + i]
|
||||||
|
out_logits, last_state = sess.run(
|
||||||
|
[modelnn.logits, modelnn.last_state],
|
||||||
|
feed_dict = {
|
||||||
|
modelnn.X: np.expand_dims(o, axis = 0),
|
||||||
|
modelnn.hidden_layer: init_value,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
init_value = last_state
|
||||||
|
output_predict[-future_day + i] = out_logits[-1]
|
||||||
|
date_ori.append(date_ori[-1] + timedelta(days = 1))
|
||||||
|
|
||||||
|
output_predict = minmax.inverse_transform(output_predict)
|
||||||
|
deep_future = anchor(output_predict[:, 0], 0.3)
|
||||||
|
sess.close()
|
||||||
|
sess.__del__()
|
||||||
|
return deep_future
|
||||||
|
|
||||||
|
|
||||||
|
# In[41]:
|
||||||
|
|
||||||
|
|
||||||
|
def newaccuration(accepted_results,truetrend):
|
||||||
|
hasilutama=0
|
||||||
|
indexbagus=0
|
||||||
|
truest=0
|
||||||
|
predictest=0
|
||||||
|
for i,x in enumerate(accepted_results):
|
||||||
|
a=x[-(test_size+2):]
|
||||||
|
|
||||||
|
#a=x[:((test_size+2)/2)]
|
||||||
|
print("a",a)
|
||||||
|
b=truetrend[-(test_size+1):]
|
||||||
|
#print("b",b)
|
||||||
|
hasil=0
|
||||||
|
true=[]
|
||||||
|
predict=[]
|
||||||
|
for xy in range(1,len((a))):
|
||||||
|
if a[xy]<a[xy-1]:
|
||||||
|
predict.append("Down")
|
||||||
|
else:
|
||||||
|
predict.append("Up")
|
||||||
|
if b[xy]<b[xy-1]:
|
||||||
|
|
||||||
|
true.append("Down")
|
||||||
|
else:
|
||||||
|
true.append("Up")
|
||||||
|
|
||||||
|
print(true)
|
||||||
|
print(predict)
|
||||||
|
for xz in range(len(true)):
|
||||||
|
if true[xz]==predict[xz]:
|
||||||
|
hasil=hasil+1
|
||||||
|
if hasil > hasilutama:
|
||||||
|
hasilutama=hasil
|
||||||
|
indexbagus=i
|
||||||
|
truest=true
|
||||||
|
predictest=predict
|
||||||
|
salah=[]
|
||||||
|
for xz in range(len(truest)):
|
||||||
|
if truest[xz]!=predictest[xz]:
|
||||||
|
salah.append(xz)
|
||||||
|
# if xz!=0:
|
||||||
|
# salah.append(xz-1)
|
||||||
|
# print("INI:",b)
|
||||||
|
print("TRUEST",truest)
|
||||||
|
print("predictest",predictest)
|
||||||
|
return hasilutama,indexbagus,salah
|
||||||
|
|
||||||
|
# In[42]:
|
||||||
|
|
||||||
|
|
||||||
|
def betaforecast(simulationsize,dfx,dftrain,df,df2,minmax):
|
||||||
|
results = []
|
||||||
|
for i in range(simulationsize):
|
||||||
|
forecast_res = forecast(df,dftrain,dfx,minmax)
|
||||||
|
results.append(forecast_res)
|
||||||
|
accepted_results = []
|
||||||
|
|
||||||
|
while not (np.array(results[0][-test_size:]) < np.min(dfx['Close'])).sum() == 0 and (np.array(results[0][-test_size:]) > np.max(dfx['Close']) * 2).sum() == 0:
|
||||||
|
print("++++++++++++++++++++++++")
|
||||||
|
print("Forecast Recalled...")
|
||||||
|
results[0]=forecast(df,dftrain,dfx,minmax)
|
||||||
|
return results[0]
|
||||||
|
|
||||||
|
|
||||||
|
# In[43]:
|
||||||
|
|
||||||
|
|
||||||
|
def interval(p1,p2):
|
||||||
|
return abs((p1) - (p2))
|
||||||
|
|
||||||
|
|
||||||
|
# In[44]:
|
||||||
|
|
||||||
|
|
||||||
|
def checkaccuracy2(true):
|
||||||
|
avg=[]
|
||||||
|
for x in range(len(true)-7):
|
||||||
|
avg.append(interval(true[x],true[x+1]))
|
||||||
|
average=sum(avg) / len(avg)
|
||||||
|
return average
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# In[45]:
|
||||||
|
|
||||||
|
|
||||||
|
def checkaccuracy(predict,true,filterx, test_size):
|
||||||
|
print("True Length: ",len(true))
|
||||||
|
print("Predict Length: ",len(predict))
|
||||||
|
|
||||||
|
# avg=[]
|
||||||
|
|
||||||
|
# for x in range(len(true)-5):
|
||||||
|
# avg.append(interval(true[x],predict[x]))
|
||||||
|
# average=sum(avg) / len(avg)
|
||||||
|
# print("AVG1:",average)
|
||||||
|
# print("AVG2:",threshold)
|
||||||
|
|
||||||
|
temp_predict=predict[-test_size:]
|
||||||
|
temp_true=true[-test_size:]
|
||||||
|
|
||||||
|
# avg2=interval(max(predict),min(predict))
|
||||||
|
count=0
|
||||||
|
print("------------------------------------")
|
||||||
|
for x in range(test_size):
|
||||||
|
# acc_var1 = temp_true[x]-(1/filterx*temp_true[x])
|
||||||
|
acc_var1 = temp_true[x]-(filterx/10)
|
||||||
|
acc_var2 = temp_predict[x]
|
||||||
|
# acc_var3 = temp_true[x]+(1/filterx*temp_true[x])
|
||||||
|
acc_var3 = temp_true[x]+(filterx/10)
|
||||||
|
acc_condition = acc_var1 <= acc_var2 <= acc_var3
|
||||||
|
# print("Var 1 : ",acc_var1)
|
||||||
|
# print("Var 2 : ",acc_var2)
|
||||||
|
# print("Var 3 : ",acc_var3)
|
||||||
|
# print("Day "+str(x+1)+" "+str(int(acc_var1))+" "+str(int(acc_var2))+" "+str(int(acc_var3))+" : ",acc_condition)
|
||||||
|
print("Day "+str(x+1)+", Price : "+str(int(temp_true[x]))+" ,Gap = "+str(int(abs(temp_predict[x]-temp_true[x])))+" : ",acc_condition)
|
||||||
|
if (acc_condition):
|
||||||
|
count=count+1
|
||||||
|
print("------------------------------------")
|
||||||
|
if count>7:
|
||||||
|
print("Result True")
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
print("Result False")
|
||||||
|
return False
|
||||||
|
|
||||||
|
# if average>threshold:
|
||||||
|
# return False
|
||||||
|
# else:
|
||||||
|
# return True
|
||||||
|
|
||||||
|
|
||||||
|
# In[46]:
|
||||||
|
|
||||||
|
|
||||||
|
def findthreshold(simulationsize,dfx,dftrain,df,df2,minmax):
|
||||||
|
results=[]
|
||||||
|
for i in range(simulationsize):
|
||||||
|
results.append(forecast(df,dftrain,dfx,minmax))
|
||||||
|
accepted_results = []
|
||||||
|
|
||||||
|
for r in results:
|
||||||
|
|
||||||
|
if (np.array(r[-test_size:]) < np.min(dfx['Close'])).sum() == 0 and (np.array(r[-test_size:]) > np.max(dfx['Close']) * 2).sum() == 0:
|
||||||
|
accepted_results.append(r)
|
||||||
|
|
||||||
|
finalavg=999999
|
||||||
|
for o in accepted_results:
|
||||||
|
avg=[]
|
||||||
|
for x in range(len(o)-5):
|
||||||
|
avg.append(interval(o[x],df2[x]))
|
||||||
|
average=sum(avg) / len(avg)
|
||||||
|
if average<=finalavg:
|
||||||
|
finalavg=average
|
||||||
|
|
||||||
|
return finalavg
|
||||||
|
|
||||||
|
def temp_data(date, xi, resultfinal, df2, date_col,x):
|
||||||
|
print("Called . . . ")
|
||||||
|
if os.path.isdir("TempData/") == False:
|
||||||
|
os.mkdir("TempData/")
|
||||||
|
if os.path.isdir("TempData/%s"%x) == False:
|
||||||
|
os.mkdir("TempData/%s"%x)
|
||||||
|
if os.path.isdir("TempData/%s/"%x+str(date)) == False:
|
||||||
|
os.mkdir("TempData/%s/"%x+str(date))
|
||||||
|
with open("TempData/%s/"%x+str(date)+"/"+x+str(xi)+".vezpal2","w+") as oop:
|
||||||
|
main=[]
|
||||||
|
main.append(resultfinal) # prediction
|
||||||
|
main.append(list(df2['Close']))
|
||||||
|
main.append(date_col)
|
||||||
|
# main.append(3)
|
||||||
|
# main.append([0])
|
||||||
|
json.dump(main,oop)
|
||||||
|
|
||||||
|
def automaton(simulationsize,date):
|
||||||
|
|
||||||
|
# symbols=["AAPL"]
|
||||||
|
symbols = sys.argv[1]
|
||||||
|
times=[]
|
||||||
|
x=symbols
|
||||||
|
# for x in symbols:
|
||||||
|
temp_time=[]
|
||||||
|
temp_time.append(x)
|
||||||
|
counter=0
|
||||||
|
validity=0
|
||||||
|
df,date_col=loader(x,test_size,date)
|
||||||
|
# print(type(df))
|
||||||
|
dfx=df
|
||||||
|
# print("ASDSAD")
|
||||||
|
df2=trueloader(x,test_size,date)
|
||||||
|
df,minmax=preproc(df)
|
||||||
|
dftrain=df
|
||||||
|
wrong=[1,2,3,4,5]
|
||||||
|
# avg=checkaccuracy2(list(df2["Close"]))
|
||||||
|
# start=time.time()
|
||||||
|
# avg=findthreshold(50,dfx,dftrain,df,list(df2["Close"]),minmax)
|
||||||
|
# temp_time.append(time.time()-start)
|
||||||
|
start=time.time()
|
||||||
|
filterx = int(prediction_gap)
|
||||||
|
able=False
|
||||||
|
print("============== || Initial Train || =============")
|
||||||
|
main_train(df,dftrain,dfx,minmax)
|
||||||
|
for xi in range(5):
|
||||||
|
decision=False
|
||||||
|
while (decision==False):
|
||||||
|
print()
|
||||||
|
print("====== [ Foreacasting Attempt : "+str(counter+1)+" ] ===========")
|
||||||
|
print("====== [ Progress : "+str(xi)+"/5 ] ")
|
||||||
|
resultfinal=betaforecast(simulationsize,dfx,dftrain,df,df2,minmax)
|
||||||
|
# validity=valid
|
||||||
|
decision=checkaccuracy(resultfinal,list(df2["Close"]),filterx, test_size)
|
||||||
|
# wrong=invalid
|
||||||
|
if decision==True:
|
||||||
|
able=True
|
||||||
|
print("ABLE")
|
||||||
|
print(str(filterx))
|
||||||
|
if counter > 10 and decision != True:
|
||||||
|
counter = 0
|
||||||
|
filterx=filterx+10
|
||||||
|
print("Filter X new value : "+str(filterx))
|
||||||
|
print("Decision Status : ", decision)
|
||||||
|
print("**************************************")
|
||||||
|
# avg=avg+(1/3*avg)
|
||||||
|
if filterx>1000:
|
||||||
|
print("====== [ GG, we gave up] =====")
|
||||||
|
continue
|
||||||
|
counter=counter+1
|
||||||
|
temp_data(date, xi, resultfinal, df2, date_col, x)
|
||||||
|
print("[ Loop : "+x+" done ] =========================")
|
||||||
|
print()
|
||||||
|
|
||||||
|
if os.path.isdir("Backtest/") == False:
|
||||||
|
os.mkdir("Backtest/")
|
||||||
|
if os.path.isdir("Backtest/%s"%x) == False:
|
||||||
|
os.mkdir("Backtest/%s"%x)
|
||||||
|
if os.path.isdir("Backtest/%s/"%x+str(date)) == False:
|
||||||
|
os.mkdir("Backtest/%s/"%x+str(date))
|
||||||
|
with open("Backtest/%s/"%x+str(date)+"/"+x+str(xi)+".vezpal2","w+") as oop:
|
||||||
|
main=[]
|
||||||
|
main.append(resultfinal) #prediction
|
||||||
|
main.append(list(df2['Close']))
|
||||||
|
main.append(date_col)
|
||||||
|
# main.append(3)
|
||||||
|
# main.append([0])
|
||||||
|
json.dump(main,oop)
|
||||||
|
print("Time for %s :"%x,time.time()-start)
|
||||||
|
temp_time.append(time.time()-start)
|
||||||
|
times.append(temp_time)
|
||||||
|
return times
|
||||||
|
|
||||||
|
def predictor(simulationsize,current):
|
||||||
|
for x in range(52):
|
||||||
|
tf.compat.v1.reset_default_graph()
|
||||||
|
current+=timedelta(days=7)
|
||||||
|
automaton(simulationsize,current)
|
||||||
|
|
||||||
|
current_date=date(2020,1,1)
|
||||||
|
if os.path.isdir("Loss/") == False:
|
||||||
|
os.mkdir("Loss/")
|
||||||
|
if os.path.isdir("Loss/"+str(current_date)) == False:
|
||||||
|
os.mkdir("Loss/"+str(current_date))
|
||||||
|
|
||||||
|
# loss_file = time.strftime("%Y%m%d-%H%M%S")
|
||||||
|
# loss_file = "Loss/"+str(date.today())+"/"+loss_file
|
||||||
|
global_start = time.time()
|
||||||
|
# profile = cProfile.Profile()
|
||||||
|
|
||||||
|
# main_func = "predictor(simulation_size,current_date)"
|
||||||
|
predictor(simulation_size,current_date)
|
||||||
|
# ps = pstats.Stats(profile.run(main_func))
|
||||||
|
print("Overall time consumption ", str(time.time()-global_start))
|
||||||
|
# ps.dump_stats("./Cprofile_model_01.ps")
|
||||||
|
|
5
runner.sh
Executable file
5
runner.sh
Executable file
@ -0,0 +1,5 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
python3 ./main_model.py PNLF.JK &
|
||||||
|
sleep 10 &&
|
||||||
|
python3 ./plot_loss.py &
|
176
screener.py
Normal file
176
screener.py
Normal file
@ -0,0 +1,176 @@
|
|||||||
|
from pandas_datareader import data as pdr
|
||||||
|
import json
|
||||||
|
from yahoo_fin import stock_info as si
|
||||||
|
from pandas import ExcelWriter
|
||||||
|
import numpy as np
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
import yfinance as yf
|
||||||
|
import pandas as pd
|
||||||
|
import requests
|
||||||
|
import datetime
|
||||||
|
import time
|
||||||
|
import sys
|
||||||
|
|
||||||
|
|
||||||
|
def roc11(closes):
|
||||||
|
print(len(closes))
|
||||||
|
roc11_val = []
|
||||||
|
roc14_val = []
|
||||||
|
roc_sum = []
|
||||||
|
x = len(closes)
|
||||||
|
for i in range(x):
|
||||||
|
if i+11 == x: # Fixed
|
||||||
|
break
|
||||||
|
# cur_indx = x - i
|
||||||
|
# temp_11 = (closes[cur_indx] - closes[cur_indx-11])/closes[cur_indx-11]*100
|
||||||
|
temp_11 = (closes[i+11] - closes[i])/closes[i]*100
|
||||||
|
roc11_val.append(temp_11)
|
||||||
|
|
||||||
|
for i in range(x):
|
||||||
|
if i+14 == x: # Fixed
|
||||||
|
break
|
||||||
|
temp_14 = (closes[i+14] - closes[i])/closes[i]*100
|
||||||
|
roc14_val.append(temp_14)
|
||||||
|
|
||||||
|
for i in range(len(roc14_val)):
|
||||||
|
roc_sum.append(roc11_val[i+3]+roc14_val[i])
|
||||||
|
print("Finished ")
|
||||||
|
return roc_sum
|
||||||
|
|
||||||
|
def wma10(roc_sum, n=10):
|
||||||
|
roc_sum = pd.DataFrame(roc_sum, columns=['COPPOCK'])
|
||||||
|
weights = np.arange(1, n+1)
|
||||||
|
wmas = roc_sum.rolling(n).apply(lambda x:np.dot(x, weights)/weights.sum(), raw=True)
|
||||||
|
|
||||||
|
print(wmas)
|
||||||
|
return wmas
|
||||||
|
|
||||||
|
|
||||||
|
yf.pdr_override()
|
||||||
|
|
||||||
|
# stocklist = si.tickers_sp500()
|
||||||
|
stock_file = open("./symbols_backup.txt", "r")
|
||||||
|
stocklist = stock_file.readlines()
|
||||||
|
stock_file.close()
|
||||||
|
index_name = '^GSPC' # S&P 500
|
||||||
|
|
||||||
|
final = []
|
||||||
|
index = []
|
||||||
|
n = -1
|
||||||
|
f = open("./Samples/SHOP/2020-01-22/SHOP0.vezpal2")
|
||||||
|
ticker = sys.argv[1]
|
||||||
|
path='./Samples/'+ticker+'/2021-01-30/'+ticker+'0.vezpal2'
|
||||||
|
path = open(path)
|
||||||
|
a = json.load(path)
|
||||||
|
|
||||||
|
pred_stock = a[0]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
n += 1
|
||||||
|
time.sleep(1)
|
||||||
|
|
||||||
|
|
||||||
|
# RS_Rating
|
||||||
|
start_date = datetime.datetime.now() - datetime.timedelta(days=365)
|
||||||
|
end_date = datetime.date.today()
|
||||||
|
#
|
||||||
|
# df = pdr.get_data_yahoo(stock, start=start_date, end=end_date)
|
||||||
|
# df['Percent Change'] = df['Adj Close'].pct_change()
|
||||||
|
# stock_return = df['Percent Change'].sum() * 100
|
||||||
|
#
|
||||||
|
# index_df = pdr.get_data_yahoo(index_name, start=start_date, end=end_date)
|
||||||
|
# index_df['Percent Change'] = index_df['Adj Close'].pct_change()
|
||||||
|
# index_return = index_df['Percent Change'].sum() * 100
|
||||||
|
#
|
||||||
|
# RS_Rating = round((stock_return / index_return) * 10, 2)
|
||||||
|
#
|
||||||
|
# sma = [50, 150, 200]
|
||||||
|
# for x in sma:
|
||||||
|
# df["SMA_"+str(x)] = round(df.iloc[:,4].rolling(window=x).mean(), 2)
|
||||||
|
|
||||||
|
# currentClose = df["Adj Close"][-1]
|
||||||
|
# moving_average_50 = df["SMA_50"][-1]
|
||||||
|
# moving_average_150 = df["SMA_150"][-1]
|
||||||
|
# moving_average_200 = df["SMA_200"][-1]
|
||||||
|
# low_of_52week = min(df["Adj Close"][-260:])
|
||||||
|
# high_of_52week = max(df["Adj Close"][-260:])
|
||||||
|
# closePrice = df["Close"]
|
||||||
|
closePrice = pred_stock
|
||||||
|
# print(closePrice.head())
|
||||||
|
|
||||||
|
roc_res = roc11(closePrice)
|
||||||
|
wma_res = wma10(roc_res)
|
||||||
|
print("==========")
|
||||||
|
print(len(wma_res));
|
||||||
|
print(len(closePrice));
|
||||||
|
print(closePrice.index)
|
||||||
|
entry_buy = []
|
||||||
|
entry_sell= []
|
||||||
|
entry_date = []
|
||||||
|
|
||||||
|
# for i in range(len(wma_res)):
|
||||||
|
# # mark_1 = sum(wma_res["COPPOCK"][i:i+3])
|
||||||
|
# if ( i+6 < len(wma_res)):
|
||||||
|
# mark_1 = wma_res["COPPOCK"][i]
|
||||||
|
# mark_2 = wma_res["COPPOCK"][i+3]
|
||||||
|
# mark_3 = wma_res["COPPOCK"][i+6]
|
||||||
|
# if (mark_2 < mark_1 and mark_2 < mark_3):
|
||||||
|
# # entry_date.append(closePrice.index[-237:][i])
|
||||||
|
# entry_buy.append(i)
|
||||||
|
# print("Down ",i)
|
||||||
|
# # print(entry_date)
|
||||||
|
#
|
||||||
|
# if (mark_2 > mark_1 and mark_2 > mark_3):
|
||||||
|
# # entry_date.append(closePrice.index[-237:][i])
|
||||||
|
# entry_sell.append(i)
|
||||||
|
# print("Up ",i)
|
||||||
|
# # print(entry_date)
|
||||||
|
|
||||||
|
# i = 0
|
||||||
|
for i in range(len(wma_res)):
|
||||||
|
# mark_1 = sum(wma_res["COPPOCK"][i:i+3])
|
||||||
|
if ( i+15 < len(wma_res)):
|
||||||
|
mark_1 = wma_res["COPPOCK"][i]
|
||||||
|
mark_2 = wma_res["COPPOCK"][i+8]
|
||||||
|
mark_3 = wma_res["COPPOCK"][i+15]
|
||||||
|
|
||||||
|
mark_h = wma_res["COPPOCK"][i+10]
|
||||||
|
|
||||||
|
if ((mark_2*3 < mark_1) and ( mark_2*3 < mark_3 ) and ( mark_2 > mark_h )):
|
||||||
|
# entry_date.append(closePrice.index[-237:][i])
|
||||||
|
entry_buy.append(i+8)
|
||||||
|
print("Down ",i," ",mark_2)
|
||||||
|
# print(entry_date)
|
||||||
|
|
||||||
|
if (mark_2 > mark_1 and mark_2 > mark_3 and mark_2 < mark_h):
|
||||||
|
# entry_date.append(closePrice.index[-237:][i])
|
||||||
|
entry_sell.append(i+8)
|
||||||
|
print("Up ",i," ",mark_2)
|
||||||
|
# print(entry_date)
|
||||||
|
# i = i + 7
|
||||||
|
# print(i)
|
||||||
|
|
||||||
|
# wma_res['Date'] = closePrice.index[-237:]
|
||||||
|
ax1 = plt.subplot(211)
|
||||||
|
plt.title("COPPOCK Indicator "+str(ticker))
|
||||||
|
# for i in entry_points:
|
||||||
|
# plt.vlines(x=closePrice[-237:][i], ymin=0, ymax=max(closePrice))
|
||||||
|
plt.plot(closePrice, markevery=entry_buy, marker="^", ms=4, mfc="y", linewidth=1)
|
||||||
|
plt.plot(closePrice, markevery=entry_sell, marker="v", ms=4, mfc="r", linewidth=1)
|
||||||
|
|
||||||
|
ax2 = plt.subplot(212, sharex=ax1)
|
||||||
|
# plt.axhline(y=0, color='r')
|
||||||
|
# plt.plot(closePrice[-len(wma_res):], wma_res, markevery=entry_sell, marker="o")
|
||||||
|
plt.plot(range(len(wma_res)), wma_res, markevery=entry_sell, marker="o", mfc='r' )
|
||||||
|
plt.plot(range(len(wma_res)), wma_res, markevery=entry_buy, marker="+", mfc='g', linewidth=0.1)
|
||||||
|
plt.show()
|
||||||
|
plt.savefig("./screening_result/pred.png")
|
||||||
|
# plt.savefig("./screening_result/"+str(stock)+".png", dpi=1200)
|
||||||
|
plt.close()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# writer = ExcelWriter("ScreenOutput.xlsx")
|
||||||
|
# exportList.to_excel(writer, "Sheet1")
|
||||||
|
# writer.save()
|
BIN
screener.py.pdf
Normal file
BIN
screener.py.pdf
Normal file
Binary file not shown.
198
screener_2.py
Normal file
198
screener_2.py
Normal file
@ -0,0 +1,198 @@
|
|||||||
|
from pandas_datareader import data as pdr
|
||||||
|
import json
|
||||||
|
from yahoo_fin import stock_info as si
|
||||||
|
from pandas import ExcelWriter
|
||||||
|
import numpy as np
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
import yfinance as yf
|
||||||
|
import pandas as pd
|
||||||
|
import requests
|
||||||
|
import datetime
|
||||||
|
import time
|
||||||
|
import sys
|
||||||
|
import glob
|
||||||
|
import os
|
||||||
|
|
||||||
|
|
||||||
|
def roc11(closes):
|
||||||
|
print(len(closes))
|
||||||
|
roc11_val = []
|
||||||
|
roc14_val = []
|
||||||
|
roc_sum = []
|
||||||
|
x = len(closes)
|
||||||
|
for i in range(x):
|
||||||
|
if i+11 == x: # Fixed
|
||||||
|
break
|
||||||
|
# cur_indx = x - i
|
||||||
|
# temp_11 = (closes[cur_indx] - closes[cur_indx-11])/closes[cur_indx-11]*100
|
||||||
|
temp_11 = (closes[i+11] - closes[i])/closes[i]*100
|
||||||
|
roc11_val.append(temp_11)
|
||||||
|
|
||||||
|
for i in range(x):
|
||||||
|
if i+14 == x: # Fixed
|
||||||
|
break
|
||||||
|
temp_14 = (closes[i+14] - closes[i])/closes[i]*100
|
||||||
|
roc14_val.append(temp_14)
|
||||||
|
|
||||||
|
for i in range(len(roc14_val)):
|
||||||
|
roc_sum.append(roc11_val[i+3]+roc14_val[i])
|
||||||
|
print("Finished ")
|
||||||
|
return roc_sum
|
||||||
|
|
||||||
|
def wma10(roc_sum, n=10):
|
||||||
|
roc_sum = pd.DataFrame(roc_sum, columns=['COPPOCK'])
|
||||||
|
weights = np.arange(1, n+1)
|
||||||
|
wmas = roc_sum.rolling(n).apply(lambda x:np.dot(x, weights)/weights.sum(), raw=True)
|
||||||
|
|
||||||
|
print(wmas)
|
||||||
|
return wmas
|
||||||
|
|
||||||
|
|
||||||
|
yf.pdr_override()
|
||||||
|
|
||||||
|
# stocklist = si.tickers_sp500()
|
||||||
|
# stock_file = open("./symbols_backup.txt", "r")
|
||||||
|
# stocklist = stock_file.readlines()
|
||||||
|
# stock_file.close()
|
||||||
|
# index_name = '^GSPC' # S&P 500
|
||||||
|
|
||||||
|
final = []
|
||||||
|
index = []
|
||||||
|
n = -1
|
||||||
|
|
||||||
|
def screener(ticker):
|
||||||
|
# ticker = sys.argv[1]
|
||||||
|
path='./Samples/'+ticker+'/'
|
||||||
|
all_dirs = glob.glob(path+"*")
|
||||||
|
print(path)
|
||||||
|
print(all_dirs)
|
||||||
|
latest_dir = max(all_dirs, key=os.path.getmtime)
|
||||||
|
print("Latest Data = ", latest_dir)
|
||||||
|
latest_dir = latest_dir + "/"
|
||||||
|
listsymbol = os.listdir(latest_dir+'/')
|
||||||
|
path = open(latest_dir+listsymbol[0])
|
||||||
|
a = json.load(path)
|
||||||
|
|
||||||
|
pred_stock = a[0]
|
||||||
|
real_pred = a[1]
|
||||||
|
|
||||||
|
# RS_Rating
|
||||||
|
start_date = datetime.datetime.now() - datetime.timedelta(days=(365*3)+13)
|
||||||
|
end_date = datetime.date.today()
|
||||||
|
#
|
||||||
|
df = pdr.get_data_yahoo(ticker, start=start_date, end=end_date)
|
||||||
|
real_stock = df["Close"].values
|
||||||
|
# real_stock = real_stock.reset_index(drop=True, inplace=True)
|
||||||
|
# real_stock = df["Close"].drop("Date")
|
||||||
|
print(real_stock)
|
||||||
|
print("************")
|
||||||
|
closePrice = pred_stock
|
||||||
|
# print(closePrice.head())
|
||||||
|
|
||||||
|
roc_res = roc11(closePrice)
|
||||||
|
wma_res = wma10(roc_res)
|
||||||
|
print("==========")
|
||||||
|
print(len(wma_res));
|
||||||
|
print(len(closePrice));
|
||||||
|
print(closePrice.index)
|
||||||
|
entry_buy = []
|
||||||
|
entry_sell= []
|
||||||
|
entry_date = []
|
||||||
|
|
||||||
|
|
||||||
|
# i = 0
|
||||||
|
entry_buy = []
|
||||||
|
|
||||||
|
val = wma_res["COPPOCK"]
|
||||||
|
y = 0
|
||||||
|
while (y<len(val)-4):
|
||||||
|
x = val[y]
|
||||||
|
z = val[y+1]
|
||||||
|
|
||||||
|
if x < z:
|
||||||
|
x = z
|
||||||
|
|
||||||
|
if x > z and x < val[y+4]:
|
||||||
|
entry_buy.append(y+1)
|
||||||
|
|
||||||
|
y = y+1
|
||||||
|
|
||||||
|
y = 0
|
||||||
|
while (y<len(val)-1):
|
||||||
|
x = val[y]
|
||||||
|
z = val[y+1]
|
||||||
|
if x > z:
|
||||||
|
x = z
|
||||||
|
if x < z and x > val[y+1]:
|
||||||
|
entry_sell.append(y)
|
||||||
|
y = y+1
|
||||||
|
|
||||||
|
mark_zero = []
|
||||||
|
for i in range(len(wma_res)-1):
|
||||||
|
mark_1 = wma_res["COPPOCK"][i]
|
||||||
|
mark_2 = wma_res["COPPOCK"][i+1]
|
||||||
|
if mark_1 > 0 and mark_2 < 0:
|
||||||
|
mark_zero.append(i+1)
|
||||||
|
|
||||||
|
mark_zero_sell = []
|
||||||
|
for i in range(len(wma_res)-20):
|
||||||
|
mark_1 = wma_res["COPPOCK"][i]
|
||||||
|
mark_2 = wma_res["COPPOCK"][i+1]
|
||||||
|
if mark_1 < 0 and mark_2 > 0:
|
||||||
|
mark_zero_sell.append(i+1)
|
||||||
|
|
||||||
|
print(entry_buy[-10:])
|
||||||
|
for i in range(len(entry_buy)):
|
||||||
|
entry_buy[i] = entry_buy[i]+21
|
||||||
|
|
||||||
|
for i in range(len(mark_zero)):
|
||||||
|
mark_zero[i] = mark_zero[i]+21
|
||||||
|
|
||||||
|
for i in range(len(mark_zero_sell)):
|
||||||
|
mark_zero_sell[i] = mark_zero_sell[i]+21
|
||||||
|
|
||||||
|
for i in entry_sell:
|
||||||
|
i = i+14
|
||||||
|
|
||||||
|
print(entry_buy[-10:])
|
||||||
|
|
||||||
|
# ax1 = plt.subplot(211)
|
||||||
|
# plt.title("COPPOCK Indicator "+str(ticker))
|
||||||
|
# plt.plot(real_stock[2:], linewidth=2, label="real", linestyle="--")
|
||||||
|
# plt.plot(real_pred, linewidth=2, label="real", linestyle="--")
|
||||||
|
# plt.plot(closePrice, linewidth=0.2, label="Prediction")
|
||||||
|
# plt.legend()
|
||||||
|
#
|
||||||
|
# ax2 = plt.subplot(212, sharex=ax1)
|
||||||
|
|
||||||
|
fill_front = []
|
||||||
|
for i in range(21):
|
||||||
|
fill_front.append(np.nan)
|
||||||
|
fill_front.extend(wma_res["COPPOCK"])
|
||||||
|
|
||||||
|
wma_res = fill_front
|
||||||
|
|
||||||
|
# plt.plot(range(len(wma_res)), wma_res, markevery=mark_zero_sell, marker="x", mfc='c', linewidth=0.8)
|
||||||
|
# plt.plot(range(len(wma_res)), wma_res, markevery=mark_zero, marker="x", mfc='c', linewidth=0.8)
|
||||||
|
# plt.plot(range(len(wma_res)), np.zeros(len(wma_res)), linewidth=0.8)
|
||||||
|
#
|
||||||
|
# plt.show()
|
||||||
|
# plt.savefig("./screening_result/pred.png")
|
||||||
|
# plt.close()
|
||||||
|
indictaor = []
|
||||||
|
indictaor.append(wma_res)
|
||||||
|
indictaor.append(np.zeros(len(wma_res)))
|
||||||
|
markers = []
|
||||||
|
markers.append(mark_zero)
|
||||||
|
markers.append(mark_zero_sell)
|
||||||
|
stocks = []
|
||||||
|
stocks.append(real_pred)
|
||||||
|
stocks.append(closePrice)
|
||||||
|
return indictaor, markers, stocks
|
||||||
|
|
||||||
|
# screener("MA")
|
||||||
|
|
||||||
|
|
||||||
|
# writer = ExcelWriter("ScreenOutput.xlsx")
|
||||||
|
# exportList.to_excel(writer, "Sheet1")
|
||||||
|
# writer.save()
|
3
stocks.csv
Normal file
3
stocks.csv
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
,Company,Index
|
||||||
|
0,"SOXL
|
||||||
|
",27
|
|
11
symbols.txt
Normal file
11
symbols.txt
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
AAPL
|
||||||
|
SHOP
|
||||||
|
MELI
|
||||||
|
GOOGL
|
||||||
|
FB
|
||||||
|
NEE
|
||||||
|
CMCSA
|
||||||
|
MA
|
||||||
|
TSLA
|
||||||
|
AMD
|
||||||
|
XAR
|
39
symbols_backup.txt
Normal file
39
symbols_backup.txt
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
AAPL
|
||||||
|
ADBE
|
||||||
|
ADP
|
||||||
|
AMT
|
||||||
|
APD
|
||||||
|
AWK
|
||||||
|
AXP
|
||||||
|
BLL
|
||||||
|
BR
|
||||||
|
CCEP
|
||||||
|
CHTR
|
||||||
|
CMCSA
|
||||||
|
CSGP
|
||||||
|
FICO
|
||||||
|
GPN
|
||||||
|
GOOGL
|
||||||
|
HSY
|
||||||
|
LBRDK
|
||||||
|
MA
|
||||||
|
MAA
|
||||||
|
MSFT
|
||||||
|
NEE
|
||||||
|
NOC
|
||||||
|
PG
|
||||||
|
PLNT
|
||||||
|
PYPL
|
||||||
|
SNPS
|
||||||
|
SOXL
|
||||||
|
SYY
|
||||||
|
VRSN
|
||||||
|
WEC
|
||||||
|
SHOP
|
||||||
|
KEYS
|
||||||
|
DG
|
||||||
|
HON
|
||||||
|
ACN
|
||||||
|
TMO
|
||||||
|
PAYC
|
||||||
|
IHI
|
127
temp_plotter.py
Normal file
127
temp_plotter.py
Normal file
@ -0,0 +1,127 @@
|
|||||||
|
import matplotlib.pyplot as plt
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
from datetime import datetime, date, timedelta
|
||||||
|
from matplotlib.dates import date2num
|
||||||
|
import yfinance as yf
|
||||||
|
import sys
|
||||||
|
import glob
|
||||||
|
import numpy as np
|
||||||
|
# path='../Febrian/Bang Nino/Samples/NEE/2020-05-08/'
|
||||||
|
|
||||||
|
my_args = sys.argv[1:]
|
||||||
|
tick = my_args[0]
|
||||||
|
filter=int(my_args[1])
|
||||||
|
visual=100
|
||||||
|
print(tick)
|
||||||
|
# path='./Samples/'+tick+'/2020-12-07/'
|
||||||
|
path='./TempData/'+tick+'/'
|
||||||
|
all_dirs = glob.glob(path+"*")
|
||||||
|
print(path)
|
||||||
|
print(all_dirs)
|
||||||
|
latest_dir = max(all_dirs, key=os.path.getctime)
|
||||||
|
print("Latest Data = ", latest_dir)
|
||||||
|
latest_dir = latest_dir + "/"
|
||||||
|
listsymbol = os.listdir(latest_dir+'/')
|
||||||
|
print(listsymbol)
|
||||||
|
|
||||||
|
with open(latest_dir+listsymbol[0],'r') as f:
|
||||||
|
print(latest_dir+listsymbol[0])
|
||||||
|
hasil=json.load(f)
|
||||||
|
# print(hasil)
|
||||||
|
print(len(hasil[0]))
|
||||||
|
print(len(hasil[1]))
|
||||||
|
print(len(hasil[2]))
|
||||||
|
akhir=datetime.strptime(str(hasil[2][-1]),'%Y-%m-%d').date()
|
||||||
|
print("Current Log : ",akhir)
|
||||||
|
extend1=[akhir+timedelta(days=x) for x in range(1,50,1)]
|
||||||
|
extend=[]
|
||||||
|
|
||||||
|
for x in extend1:
|
||||||
|
if x.weekday() not in [5,6]:
|
||||||
|
extend.append(x)
|
||||||
|
|
||||||
|
extend=extend[:14]
|
||||||
|
extend=[str(x) for x in extend]
|
||||||
|
print("ASasdasdsa",extend)
|
||||||
|
# date=date2num(date)
|
||||||
|
date=hasil[2]
|
||||||
|
plt.plot(hasil[1],color='grey',linewidth=3, linestyle='--', marker='x', alpha=0.8, label="Confirmation")
|
||||||
|
plt.plot(hasil[2],hasil[1][0:-5],color='b',linewidth=2)
|
||||||
|
# plt.show()
|
||||||
|
|
||||||
|
symbol=yf.Ticker(tick)
|
||||||
|
symbol=symbol.history(start=akhir,end=akhir+timedelta(days=30),interval='1d')
|
||||||
|
# print(symbol)
|
||||||
|
symbol=symbol.drop(symbol.index[0])
|
||||||
|
symbol=symbol.drop(symbol.index[0])
|
||||||
|
symbol=symbol['Close'][0:14].tolist()
|
||||||
|
# plt.plot(extend,symbol,color='g',linewidth=10)
|
||||||
|
avg=[]
|
||||||
|
for ex,x in enumerate(listsymbol):
|
||||||
|
with open(latest_dir+x) as f:
|
||||||
|
print(x)
|
||||||
|
hasil=json.load(f)
|
||||||
|
a=hasil[0][-10:][:14]
|
||||||
|
# a = hasil[0][-14:]
|
||||||
|
b=hasil[1][-14:]
|
||||||
|
print("A & B Temp :")
|
||||||
|
print(a)
|
||||||
|
print(b)
|
||||||
|
count=0
|
||||||
|
for x in range(7):
|
||||||
|
if (b[x]-(filter/10) <= a[x] <= b[x]+(filter/10)):
|
||||||
|
count=count+1
|
||||||
|
if count>5:
|
||||||
|
print("HORE")
|
||||||
|
avg.append(hasil[0][-14:])
|
||||||
|
# plt.plot(date+extend,hasil[0], label='Sample %s'%ex, alpha=0.3)
|
||||||
|
# print(avg)
|
||||||
|
print("#################################################")
|
||||||
|
print(avg)
|
||||||
|
print(" ")
|
||||||
|
print("#################################################")
|
||||||
|
avg_total=[]
|
||||||
|
for x in range(14):
|
||||||
|
temp=[]
|
||||||
|
for a in range(len(avg)):
|
||||||
|
temp.append(avg[a][x])
|
||||||
|
# avg_total.append(sum(temp)/len(temp))
|
||||||
|
avg_total.append(np.mean(temp))
|
||||||
|
|
||||||
|
# print(avg_total)
|
||||||
|
|
||||||
|
atas=[x+(1/visual*x) for x in avg_total]
|
||||||
|
bawah=[x-(1/visual*x) for x in avg_total]
|
||||||
|
print([hasil[1][0:-5][-1]]+bawah)
|
||||||
|
print([hasil[1][0:-5][-1]]+atas)
|
||||||
|
print([date[-1]]+extend)
|
||||||
|
plt.fill_between([date[-1]]+extend,[hasil[1][0:-5][-1]]+bawah,[hasil[1][0:-5][-1]]+atas,alpha=0.2,label='Prediction Band')
|
||||||
|
plt.plot([date[-1]]+extend,[hasil[1][-5]]+avg_total,color='y',linewidth=1,label='Prediction', marker='x')
|
||||||
|
plt.grid()
|
||||||
|
# plt.show()
|
||||||
|
|
||||||
|
# print("Date Extended ",(date+extend))
|
||||||
|
symbol = yf.Ticker(tick)
|
||||||
|
symbol = symbol.history(start=akhir,end=akhir+timedelta(days=20),interval='1d')
|
||||||
|
print(symbol)
|
||||||
|
symbol = symbol.drop(symbol.index[0])
|
||||||
|
symbol = symbol.drop(symbol.index[0])
|
||||||
|
symbol = symbol['Close'][0:14].tolist()
|
||||||
|
# symbol = symbol['Close'].tolist()
|
||||||
|
# print(symbol)
|
||||||
|
# plt.plot(extend[0:len(symbol)],symbol,color='g',label='Actual',linewidth=1)
|
||||||
|
# plt.plot(hasil[1],color='r', linestyle='--', label='Confirmation',linewidth=2)
|
||||||
|
plt.plot(hasil[2]+extend[0:14],hasil[0],color='b',label='Train',linewidth=2, alpha=0.4)
|
||||||
|
# plt.plot(symbol, label="Symbol Real")
|
||||||
|
# plt.plot(hasil[0], label="Real Predictioh")
|
||||||
|
# print(symbol)
|
||||||
|
# print(symbol)
|
||||||
|
# detail = str(akhir)+"\n"+"Prediction :"+str(avg_total[-1:])+"\n"+"Real : "
|
||||||
|
# plt.text(0.05, 120, detail, color='black', bbox=dict(facecolor='none', edgecolor='black', boxstyle='round,pad=1'))
|
||||||
|
plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.01),
|
||||||
|
fancybox=True, shadow=True, ncol=7)
|
||||||
|
plt.title(tick+" Date: "+str(hasil[2][-1])+" to "+str(extend[-1]))
|
||||||
|
# plt.get_xaxis().set_ticks([])
|
||||||
|
plt.xticks([])
|
||||||
|
plt.show()
|
BIN
temp_pred.png
Normal file
BIN
temp_pred.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 46 KiB |
589
torch_model.py
Normal file
589
torch_model.py
Normal file
@ -0,0 +1,589 @@
|
|||||||
|
import torch
|
||||||
|
import torch.nn as nn
|
||||||
|
from torch.utils.tensorboard import SummaryWriter
|
||||||
|
import pandas as pd
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
from sklearn.preprocessing import MinMaxScaler
|
||||||
|
from sklearn.metrics import mean_squared_error
|
||||||
|
import numpy as np
|
||||||
|
import time, math
|
||||||
|
import yfinance as yf
|
||||||
|
from indicator_MACD import *
|
||||||
|
import os
|
||||||
|
|
||||||
|
def preproc(df):
|
||||||
|
minmax = MinMaxScaler().fit(df.iloc[:,2:5].astype('float32')) # Close, Volume, and all
|
||||||
|
df_log = minmax.transform(df.iloc[:, 2:5].astype('float32')) # Close, Volume, and all
|
||||||
|
df_log = pd.DataFrame(df_log)
|
||||||
|
df_log.head()
|
||||||
|
input()
|
||||||
|
return df_log,minmax
|
||||||
|
|
||||||
|
symbol=yf.Ticker(input("Enter the Ticker : "))
|
||||||
|
data=symbol.history(interval="1d",period="4y")
|
||||||
|
data=data[:-7]
|
||||||
|
real_data = data.iloc[:, 3]
|
||||||
|
real_data = pd.DataFrame(real_data)
|
||||||
|
# real_data = real_data["Close"]
|
||||||
|
print("")
|
||||||
|
print("Real Data ")
|
||||||
|
print(real_data[-7:])
|
||||||
|
|
||||||
|
tick_macd = macd_data(real_data)
|
||||||
|
plt.subplot(2, 1, 1)
|
||||||
|
ax = plt.plot(real_data)
|
||||||
|
ax = plt.bar(tick_macd.index, tick_macd['Gap'], width=0.8)
|
||||||
|
|
||||||
|
plt.subplot(2,1,2)
|
||||||
|
ax = plt.bar(tick_macd.index, tick_macd['Gap'], width=0.8)
|
||||||
|
|
||||||
|
plt.show()
|
||||||
|
|
||||||
|
df = pd.DataFrame(data)
|
||||||
|
# df = tick_macd
|
||||||
|
# df.index = pd.to_datetime(df.index)
|
||||||
|
# print((df))
|
||||||
|
|
||||||
|
|
||||||
|
#df = pd.read_csv("./data/AAPL_2020-10-31.csv", header=0, index_col=0)
|
||||||
|
#df = df[-365:]
|
||||||
|
# df = df.iloc[:, 3]
|
||||||
|
# df = df["Close"]
|
||||||
|
print(data.head())
|
||||||
|
print(df.head())
|
||||||
|
# input()
|
||||||
|
# print(df)
|
||||||
|
# plt.plot(df)
|
||||||
|
# plt.show()
|
||||||
|
# ktest_size = 300
|
||||||
|
# ktest_data =df[:-test_size]
|
||||||
|
# ktrain_data = df[-test_size:]
|
||||||
|
|
||||||
|
# scaler = MinMaxScaler()
|
||||||
|
df_test = df
|
||||||
|
df, scaler = preproc(df)
|
||||||
|
# scaler = scaler.fit(np.expand_dims(df, axis=1))
|
||||||
|
# df = scaler.transform(np.expand_dims(df, axis=1))
|
||||||
|
|
||||||
|
# train_data = scaler.transform(np.expand_dims(train_data, axis=1))
|
||||||
|
# test_data = scaler.transform(np.expand_dims(test_data, axis=1))
|
||||||
|
|
||||||
|
class LSTM(nn.Module):
|
||||||
|
def __init__(self, input_dim, hidden_dim, num_layers, output_dim):
|
||||||
|
super(LSTM, self).__init__()
|
||||||
|
self.hidden_dim = hidden_dim
|
||||||
|
self.num_layers = num_layers
|
||||||
|
|
||||||
|
self.lstm = nn.LSTM(input_dim, hidden_dim, num_layers, batch_first=True)
|
||||||
|
self.fc = nn.Linear(hidden_dim, output_dim)
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_dim).requires_grad_()
|
||||||
|
c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_dim).requires_grad_()
|
||||||
|
out, (hnn,cnn) = self.lstm(x, (h0.detach(), c0.detach()))
|
||||||
|
out = self.fc(out[:,-1,:])
|
||||||
|
return out
|
||||||
|
|
||||||
|
class GRU(nn.Module):
|
||||||
|
def __init__(self, input_dim, hidden_dim, num_layers, output_dim):
|
||||||
|
super(GRU, self).__init__()
|
||||||
|
self.hidden_dim = hidden_dim
|
||||||
|
self.num_layers = num_layers
|
||||||
|
self.gru = nn.GRU(input_dim, hidden_dim, num_layers, batch_first=True)
|
||||||
|
self.fc = nn.Linear(hidden_dim, output_dim)
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_dim).requires_grad_()
|
||||||
|
out, (hn) = self.gru(x, (h0.detach()))
|
||||||
|
out = self.fc(out[:, -1, :])
|
||||||
|
return out
|
||||||
|
|
||||||
|
# def window(data, seq_length):
|
||||||
|
# xs = []
|
||||||
|
# ys = []
|
||||||
|
# # print(data)
|
||||||
|
# for i in range(len(data)-seq_length-1):
|
||||||
|
# x = data[i:(i+seq_length)]
|
||||||
|
# y = data[i+seq_length]
|
||||||
|
# xs.append(x)
|
||||||
|
# ys.append(y)
|
||||||
|
# #plt.plot(ys)
|
||||||
|
# #plt.plot(ys[0]+xs[0])
|
||||||
|
# #plt.show()
|
||||||
|
# print((xs[0]))
|
||||||
|
# print((ys[0]))
|
||||||
|
# print("------------")
|
||||||
|
# print(data[29])
|
||||||
|
# print(data[30])
|
||||||
|
# input()
|
||||||
|
# return np.array(xs), np.array(ys)
|
||||||
|
|
||||||
|
|
||||||
|
def split_window_mod(my_data, lookback):
|
||||||
|
data_raw = my_data.to_numpy()
|
||||||
|
# data_raw = my_data
|
||||||
|
data = []
|
||||||
|
|
||||||
|
for index in range(len(data_raw)-lookback):
|
||||||
|
data.append(data_raw[index : index + lookback+1])
|
||||||
|
# print(data[-1:])
|
||||||
|
|
||||||
|
print("Shape : ", len(data[0]))
|
||||||
|
print(data[-3:-1])
|
||||||
|
input()
|
||||||
|
|
||||||
|
# data = pd.DataFrame(data)
|
||||||
|
# data = data.iloc[:, 1:5]
|
||||||
|
data = np.array(data)
|
||||||
|
print("Current Data ")
|
||||||
|
print(data)
|
||||||
|
input()
|
||||||
|
|
||||||
|
# print(data[0])
|
||||||
|
# print(data[1])
|
||||||
|
test_set_size = int(np.round(0.3*data.shape[0]));
|
||||||
|
train_set_size = data.shape[0] - (test_set_size);
|
||||||
|
print("TEST:",train_set_size)
|
||||||
|
print("TEST:",test_set_size)
|
||||||
|
x_train = data[:train_set_size, :-1, :]
|
||||||
|
print("")
|
||||||
|
print(" ============== X_train ============== ")
|
||||||
|
print(x_train[-2:])
|
||||||
|
print("")
|
||||||
|
input()
|
||||||
|
y_train = data[1:train_set_size, -1, :]
|
||||||
|
print("")
|
||||||
|
print(" ============== y_train =====")
|
||||||
|
print(y_train[-2:])
|
||||||
|
print("")
|
||||||
|
input()
|
||||||
|
print("Current Data Y")
|
||||||
|
# print(x_train[0])
|
||||||
|
print(y_train[0])
|
||||||
|
print(y_train[1])
|
||||||
|
x_test = data[train_set_size:, :-1]
|
||||||
|
print("")
|
||||||
|
print(" ============== X_test ============== ")
|
||||||
|
print(x_test[-2:])
|
||||||
|
print("")
|
||||||
|
input()
|
||||||
|
y_test = data[-test_set_size:-1, -1, :] # Problematic slicing
|
||||||
|
print("")
|
||||||
|
print(" ============== y_test ============== ")
|
||||||
|
print(y_test[-2:])
|
||||||
|
print("")
|
||||||
|
input()
|
||||||
|
print("Current Data ")
|
||||||
|
# print(x_test[0])
|
||||||
|
x_train=x_train[:-1]
|
||||||
|
x_test=x_test[:-1]
|
||||||
|
print(len(x_train))
|
||||||
|
print(len(y_train))
|
||||||
|
print(len(x_test))
|
||||||
|
print(len(y_test))
|
||||||
|
|
||||||
|
return [x_train, y_train, x_test, y_test]
|
||||||
|
|
||||||
|
def split_window(my_data, lookback):
|
||||||
|
# data_raw = my_data.to_numpy()
|
||||||
|
data_raw = my_data.to_numpy()
|
||||||
|
data = []
|
||||||
|
|
||||||
|
for index in range(len(data_raw)-lookback):
|
||||||
|
data.append(data_raw[index : index + lookback])
|
||||||
|
|
||||||
|
# print(data)
|
||||||
|
data = np.array(data);
|
||||||
|
print("Current Data ")
|
||||||
|
test_set_size = int(np.round(0.1*data.shape[0]));
|
||||||
|
train_set_size = data.shape[0] - (test_set_size);
|
||||||
|
|
||||||
|
x_train = data[:train_set_size, :-1, :]
|
||||||
|
y_train = data[:train_set_size, -1, :]
|
||||||
|
|
||||||
|
x_test = data[train_set_size:, :-1]
|
||||||
|
|
||||||
|
y_test = data[train_set_size:, -1, :]
|
||||||
|
|
||||||
|
return [x_train, y_train, x_test, y_test]
|
||||||
|
|
||||||
|
def window(data, seq_length):
|
||||||
|
xs = []
|
||||||
|
ys = []
|
||||||
|
# print(data)
|
||||||
|
for i in range(len(data)-seq_length-1):
|
||||||
|
x = data[i:(i+seq_length)]
|
||||||
|
try:
|
||||||
|
y = data[i+7+seq_length]
|
||||||
|
except:
|
||||||
|
break
|
||||||
|
xs.append(x)
|
||||||
|
ys.append(y)
|
||||||
|
#plt.plot(ys)
|
||||||
|
#plt.plot(ys[0]+xs[0])
|
||||||
|
#plt.show()
|
||||||
|
print((xs[0]))
|
||||||
|
print((ys[0]))
|
||||||
|
print("------------")
|
||||||
|
print(data[29])
|
||||||
|
print(data[37])
|
||||||
|
# input()
|
||||||
|
return np.array(xs), np.array(ys)
|
||||||
|
|
||||||
|
def forecasting():
|
||||||
|
print("Forecasting . . .")
|
||||||
|
|
||||||
|
input_dim = 3
|
||||||
|
hidden_dim = 128
|
||||||
|
num_layers = 2
|
||||||
|
output_dim = 3
|
||||||
|
num_epochs = int(input("Epoch fof training : "))
|
||||||
|
lookback = 7
|
||||||
|
|
||||||
|
# CUDA
|
||||||
|
is_cuda = torch.cuda.is_available()
|
||||||
|
if is_cuda:
|
||||||
|
device = torch.device("cuda")
|
||||||
|
torch.cuda.set_device(0)
|
||||||
|
else:
|
||||||
|
device = torch.device("cpu")
|
||||||
|
|
||||||
|
# REPLACED
|
||||||
|
# seq_length = 7
|
||||||
|
# X_train, y_train = window(train_data, seq_length)
|
||||||
|
# X_test, y_test = window(test_data, seq_length)
|
||||||
|
|
||||||
|
# X_train, y_train, X_test, y_test = split_window(df, lookback)
|
||||||
|
# a, b, c, d = split_window_mod(df_test, lookback)
|
||||||
|
X_train, y_train, X_test, y_test = split_window_mod(df, lookback)
|
||||||
|
|
||||||
|
print(X_test.shape)
|
||||||
|
#input()
|
||||||
|
|
||||||
|
# fill Nan for adjusted forecasting
|
||||||
|
# filler_X_train = np.empty_like(X_train)
|
||||||
|
# filler_X_train[:,:] = np.nan
|
||||||
|
# X_train = np.insert(X_train, 0, X_train[7:14], axis=0)
|
||||||
|
print("")
|
||||||
|
print("Fill Attempt X_train ====================================== ")
|
||||||
|
print(X_train[:8])
|
||||||
|
input()
|
||||||
|
|
||||||
|
# filler_y_train = np.empty_like(y_train)
|
||||||
|
# filler_y_train[:, :] = np.nan
|
||||||
|
# y_train = np.append(y_train, y_train[-7:], axis=0)
|
||||||
|
print("")
|
||||||
|
print("Fill Attempt y_train ====================================== ")
|
||||||
|
print(y_train[-8:])
|
||||||
|
input()
|
||||||
|
|
||||||
|
# print("")
|
||||||
|
# print("Fill Attempt X_test ====================================== ")
|
||||||
|
# print(X_test.shape)
|
||||||
|
# # filler_X_test = X_train[-7:]
|
||||||
|
# # filler_X_test = np.empty_like(X_test)
|
||||||
|
# # filler_X_test[:,:] = np.nan
|
||||||
|
# X_test = np.insert(X_test, 0, X_test[7:14], axis=0)
|
||||||
|
# print(X_test.shape)
|
||||||
|
# print(X_test[:7])
|
||||||
|
# input()
|
||||||
|
#
|
||||||
|
# filler_y_test = np.empty_like(y_test)
|
||||||
|
# filler_y_test[:, :] = np.nan
|
||||||
|
# y_test = np.append(y_test, filler_y_test[-7:], axis=0)
|
||||||
|
# print("")
|
||||||
|
# print("Fill Attempt y_test ====================================== ")
|
||||||
|
# print(y_test[-7:])
|
||||||
|
# input()
|
||||||
|
|
||||||
|
X_train = torch.from_numpy(X_train).type(torch.Tensor)
|
||||||
|
y_train = torch.from_numpy(y_train).type(torch.Tensor)
|
||||||
|
X_test = torch.from_numpy(X_test).type(torch.Tensor)
|
||||||
|
y_test = torch.from_numpy(y_test).type(torch.Tensor)
|
||||||
|
|
||||||
|
# X_train = torch.from_numpy(X_train).float()
|
||||||
|
# y_train = torch.from_numpy(y_train).float()
|
||||||
|
# X_test = torch.from_numpy(X_test).float()
|
||||||
|
# y_test = torch.from_numpy(y_test).float()
|
||||||
|
|
||||||
|
# X_test = torch.from_numpy(np.append(X_test, future_days)).float()
|
||||||
|
# y_test = torch.from_numpy(np.append(y_test, future_days)).float()
|
||||||
|
|
||||||
|
print(X_test.shape)
|
||||||
|
#input()
|
||||||
|
# Tensorboard Setup
|
||||||
|
writer = SummaryWriter()
|
||||||
|
|
||||||
|
model = GRU(input_dim=input_dim, hidden_dim=hidden_dim, output_dim=output_dim, num_layers=num_layers)
|
||||||
|
|
||||||
|
# model = LSTM(input_dim=input_dim, hidden_dim=hidden_dim, output_dim=output_dim, num_layers=num_layers)
|
||||||
|
|
||||||
|
criterion = torch.nn.MSELoss(reduction='mean')
|
||||||
|
optimiser = torch.optim.Adam(model.parameters(), lr=0.01)
|
||||||
|
|
||||||
|
hist = np.zeros(num_epochs)
|
||||||
|
start_time = time.time()
|
||||||
|
gru = []
|
||||||
|
|
||||||
|
print("X_train : ")
|
||||||
|
print(X_train[-1:])
|
||||||
|
input()
|
||||||
|
for t in range(num_epochs):
|
||||||
|
y_train_pred = model(X_train)
|
||||||
|
loss = criterion(y_train_pred, y_train)
|
||||||
|
print("Epoch ", t+1, "MSE: ", loss.item())
|
||||||
|
|
||||||
|
# y_test_pred = model(X_test)
|
||||||
|
# loss_valid = criterion(y_test_pred, y_test)
|
||||||
|
# print("-------------------- Validation MSE : ", loss_valid.item())
|
||||||
|
|
||||||
|
hist[t] = loss.item()
|
||||||
|
optimiser.zero_grad()
|
||||||
|
loss.backward()
|
||||||
|
optimiser.step()
|
||||||
|
writer.add_scalar("Loss/Train", loss.item(), t)
|
||||||
|
|
||||||
|
# writer.add_scalar("Validation/Train", loss, t)
|
||||||
|
|
||||||
|
training_time = time.time() - start_time
|
||||||
|
print("Time Spent : {}".format(training_time))
|
||||||
|
|
||||||
|
predict = pd.DataFrame(scaler.inverse_transform(y_train_pred.detach().numpy()))
|
||||||
|
original = pd.DataFrame(scaler.inverse_transform(y_train.detach().numpy()))
|
||||||
|
|
||||||
|
plt.subplot(1, 2, 1)
|
||||||
|
ax = plt.plot(original.index, original[0])
|
||||||
|
ax = plt.plot(predict.index, predict[0])
|
||||||
|
|
||||||
|
plt.subplot(1,2,2)
|
||||||
|
ax = plt.plot(hist)
|
||||||
|
plt.show()
|
||||||
|
|
||||||
|
|
||||||
|
# Predict from test data
|
||||||
|
print("================== Test Data Specification ===========")
|
||||||
|
print(X_test[-1:])
|
||||||
|
print(X_test.shape)
|
||||||
|
for t in range(int(np.round(0.5 * num_epochs))):
|
||||||
|
y_test_pred = model(X_test)
|
||||||
|
loss = criterion(y_test_pred, y_test)
|
||||||
|
print("Epoch ", t+1, "MSE: ", loss.item())
|
||||||
|
|
||||||
|
# y_test_pred = model(X_test)
|
||||||
|
# loss_valid = criterion(y_test_pred, y_test)
|
||||||
|
# print("-------------------- Validation MSE : ", loss_valid.item())
|
||||||
|
|
||||||
|
hist[t] = loss.item()
|
||||||
|
optimiser.zero_grad()
|
||||||
|
loss.backward()
|
||||||
|
optimiser.step()
|
||||||
|
writer.add_scalar("Loss/Test", loss.item(), t)
|
||||||
|
|
||||||
|
y_test = scaler.inverse_transform(y_test.detach().numpy())
|
||||||
|
test_predict = pd.DataFrame(scaler.inverse_transform(y_test_pred.detach().numpy()))
|
||||||
|
# test_original = pd.DataFrame(scaler.inverse_transform(y_test.detach().numpy()))
|
||||||
|
test_original = pd.DataFrame(y_test)
|
||||||
|
print(test_original)
|
||||||
|
print(test_predict[:10])
|
||||||
|
input()
|
||||||
|
|
||||||
|
|
||||||
|
# plt.plot(test_original.index, test_original[0])
|
||||||
|
# plt.plot(test_original.index, test_original[0], label="Real Data")
|
||||||
|
# plt.plot(test_predict.index, test_predict[0], marker=".", label="Prediction")
|
||||||
|
# Bar
|
||||||
|
plt.bar(test_original.index, test_original[0], label="Real Data")
|
||||||
|
plt.bar(test_predict.index, test_predict[0], label="Prediction")
|
||||||
|
plt.title("Test Result ")
|
||||||
|
plt.show()
|
||||||
|
|
||||||
|
# Invert prediction results
|
||||||
|
y_train_pred = scaler.inverse_transform(y_train_pred.detach().numpy())
|
||||||
|
y_train = scaler.inverse_transform(y_train.detach().numpy())
|
||||||
|
|
||||||
|
y_test_pred = scaler.inverse_transform(y_test_pred.detach().numpy())
|
||||||
|
|
||||||
|
# calculate mean square error
|
||||||
|
# trainScore = math.sqrt(mean_squared_error(y_train[:,0], y_train_pred[:,0]))
|
||||||
|
# testScore = math.sqrt(mean_squared_error(y_test[:,0], y_test_pred[:,0]))
|
||||||
|
# print("Train Score: %.2f " % (trainScore))
|
||||||
|
# print("Test Score: %.2f " % (trainScore))
|
||||||
|
|
||||||
|
# gru.append(trainScore)
|
||||||
|
# gru.append(testScore)
|
||||||
|
# gru.append(training_time)
|
||||||
|
|
||||||
|
filler_df = np.empty_like(df)
|
||||||
|
filler_df[:, :] = np.nan
|
||||||
|
df = np.append(df, filler_df[-7:], axis=0)
|
||||||
|
|
||||||
|
trainPredictPlot = np.empty_like(df)
|
||||||
|
trainPredictPlot[:,:] = np.nan
|
||||||
|
print("Train reference:")
|
||||||
|
print("[1]", str(len(y_train_pred)+lookback))
|
||||||
|
print("[2]", str(len(df)-1))
|
||||||
|
print ( "[ y_train_pred] ", str(len(y_train_pred)) )
|
||||||
|
# input()
|
||||||
|
print("")
|
||||||
|
trainPredictPlot[:len(y_train_pred), :] = y_train_pred
|
||||||
|
# trainPredictPlot[lookback:len(y_train_pred)+lookback, :] = y_train_pred
|
||||||
|
print("y_test_pred shape ", y_test_pred.shape)
|
||||||
|
|
||||||
|
# Stacked prediction
|
||||||
|
rrr_predict = X_test[len(X_test)-1][-(lookback-1):]
|
||||||
|
rrr_predict = np.expand_dims(rrr_predict, axis=0)
|
||||||
|
rrr_predict = torch.from_numpy(rrr_predict).type(torch.Tensor)
|
||||||
|
xxx_predict = model(rrr_predict)
|
||||||
|
vvv_predict = X_test[len(X_test)-1][-(lookback-1):]
|
||||||
|
print()
|
||||||
|
print("|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||")
|
||||||
|
print("Real data ")
|
||||||
|
print(X_test[-1:])
|
||||||
|
print("input data: ")
|
||||||
|
print(vvv_predict)
|
||||||
|
print("Prediction Result: ")
|
||||||
|
print(xxx_predict)
|
||||||
|
zzz_predict = np.append(vvv_predict.detach().numpy(), xxx_predict.detach().numpy(), axis=0) #
|
||||||
|
print("new X_test n+1 ") #
|
||||||
|
print(zzz_predict) #
|
||||||
|
zzz_predict = np.expand_dims(zzz_predict, axis=1) # set axis=1 to get prediction result for 7 days straight
|
||||||
|
print(zzz_predict) #
|
||||||
|
aaa_predict = torch.from_numpy(zzz_predict).type(torch.Tensor) #
|
||||||
|
print("n+1+1 prediction ")
|
||||||
|
aaa_predict = model(aaa_predict)
|
||||||
|
# tmp_future = np.empty_like(aaa_predict)
|
||||||
|
# tmp_future = np.append(tmp_future, aaa_predict.detach().numpy(), axis=0)
|
||||||
|
# print(tmp_future)
|
||||||
|
|
||||||
|
# print("***************************************************************************************")
|
||||||
|
# for x in range(lookback):
|
||||||
|
# tmp_val = tmp_future[-(lookback-1):]
|
||||||
|
#
|
||||||
|
# aaa_predict = np.expand_dims(aaa_predict.detach().numpy(), axis=0)
|
||||||
|
# aaa_predict = torch.from_numpy(tmp_val).type(torch.Tensor) #
|
||||||
|
# aaa_predict = model(aaa_predict)
|
||||||
|
# print(aaa_predict)
|
||||||
|
# # tmp_future = np.append(tmp_future[0], aaa_predict[0], axis=0)
|
||||||
|
# print("***************************************************************************************")
|
||||||
|
aaa_future = scaler.inverse_transform(aaa_predict.detach().numpy())
|
||||||
|
y_test_pred = np.append(y_test_pred, aaa_future, axis=0)
|
||||||
|
print(y_test_pred[-2:])
|
||||||
|
# plt.plot(aaa_future)
|
||||||
|
# plt.show()
|
||||||
|
print()
|
||||||
|
print("|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||")
|
||||||
|
print()
|
||||||
|
input()
|
||||||
|
predict_test = pd.DataFrame(y_train_pred)
|
||||||
|
original_test = pd.DataFrame(y_train)
|
||||||
|
|
||||||
|
# predict_test = pd.DataFrame(y_train_pred)
|
||||||
|
# original_test = pd.DataFrame(y_train)
|
||||||
|
|
||||||
|
testPredictPlot = np.empty_like(df)
|
||||||
|
testPredictPlot[:,:] = np.nan
|
||||||
|
print("Our Problem child :")
|
||||||
|
print("[1]", str(len(y_test_pred)+lookback-1))
|
||||||
|
print("[2]", str(len(df)-1))
|
||||||
|
print("[3]", str(len(testPredictPlot)))
|
||||||
|
print("[ y_test_pre] ", str(len(y_test_pred)))
|
||||||
|
print(testPredictPlot.shape)
|
||||||
|
print(testPredictPlot[-10:])
|
||||||
|
print("")
|
||||||
|
# testPredictPlot[len(y_train_pred)+lookback-1:len(df)-3, :] = y_test_pred
|
||||||
|
testPredictPlot[len(y_train_pred):len(df)-16, :] = test_predict
|
||||||
|
|
||||||
|
fill_tail = np.empty_like(df)
|
||||||
|
fill_tail[:, :] = np.nan
|
||||||
|
z_future_pred = np.append(fill_tail, fill_tail, axis=1)
|
||||||
|
# z_future_pred = np.append(z_future_pred, fill_tail, axis=1)
|
||||||
|
z_future_pred = np.empty_like(z_future_pred)
|
||||||
|
print(fill_tail.shape)
|
||||||
|
print(z_future_pred.shape)
|
||||||
|
# print(z_future_pred[-10:])
|
||||||
|
|
||||||
|
X_future = np.empty_like(X_test[:1])
|
||||||
|
print("========= Future Days =======")
|
||||||
|
X_future[:,:] = np.nan
|
||||||
|
print(" Original shape : ", X_future.shape)
|
||||||
|
print(X_test[-1:])
|
||||||
|
|
||||||
|
X_future = np.append(X_test, X_future[lookback:], axis=0)
|
||||||
|
|
||||||
|
# print(X_future[-10:])
|
||||||
|
print("Aftern append values : ", X_future.shape)
|
||||||
|
X_future = torch.from_numpy(X_future[:lookback]).type(torch.Tensor)
|
||||||
|
print("========= Future Days =======")
|
||||||
|
print(X_future[-1:])
|
||||||
|
print(X_future.shape)
|
||||||
|
z_future_pred = model(X_future)
|
||||||
|
z_future_pred = scaler.inverse_transform(z_future_pred.detach().numpy())
|
||||||
|
# future_predictPlot[len(testPredictPlot)] =
|
||||||
|
|
||||||
|
original_df = scaler.inverse_transform(df)
|
||||||
|
# AAAAAAAAAAAAAAA
|
||||||
|
bbb_future = np.empty_like(df)
|
||||||
|
bbb_future[:,:] = np.nan
|
||||||
|
bbb_future[-(lookback):, :] = aaa_future
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
predictions = np.append(trainPredictPlot, testPredictPlot, axis=1)
|
||||||
|
predictions = np.append(predictions, original_df, axis=1)
|
||||||
|
predictions = np.append(predictions, bbb_future, axis=1)
|
||||||
|
# predictions = np.append(predictions, z_future_pred, axis=1)
|
||||||
|
result = pd.DataFrame(predictions)
|
||||||
|
print(result[-6:])
|
||||||
|
print(real_data[-6:])
|
||||||
|
input()
|
||||||
|
# plt.subplot(2,1,2)
|
||||||
|
for i in range(1):
|
||||||
|
plt.title("Train and Validation")
|
||||||
|
# plt.plot(result.index, result[int(input("Index for Original : "))], label="Original", color="gray", linestyle="--", linewidth=2, alpha=0.3)
|
||||||
|
# plt.plot(result.index, result[int(input("Index for Train : "))], label="Train", color="blue", marker=".", linewidth=1)
|
||||||
|
# plt.plot(result.index, result[int(input("Index for Test : "))], label="Test", color="red", marker=".", linewidth=1)
|
||||||
|
# plt.plot(result.index, result[int(input("Index for Test : "))], label="AAA", color="yellow", marker="o", linewidth=1)
|
||||||
|
|
||||||
|
|
||||||
|
# Bar
|
||||||
|
# plt.bar(result.index, result[int(input("Index for Original : "))], label="Original", color="gray", alpha=0.3)
|
||||||
|
# plt.bar(result.index, result[int(input("Index for Train : "))], label="Train", color="blue")
|
||||||
|
# plt.bar(result.index, result[int(input("Index for Test : "))], label="Test", color="red")
|
||||||
|
# plt.bar(result.index, result[int(input("Index for Test : "))], label="AAA", color="cyan")
|
||||||
|
|
||||||
|
plt.bar(result.index, result[8], label="Original", color="gray", alpha=0.3)
|
||||||
|
plt.bar(result.index, result[2], label="Train", color="blue")
|
||||||
|
plt.bar(result.index, result[5], label="Test", color="red")
|
||||||
|
plt.bar(result.index, result[11], label="Prediction", color="cyan")
|
||||||
|
# plt.plot(real_data.index, real_data[int(input("Index for real data"))], label="aaaaa")
|
||||||
|
# plt.plot(result.index, result[3], color="red", marker=".", linewidth=1)
|
||||||
|
# plt.xticks(range(0,data.shape[0],500),df['Close'][0].loc[::500],rotation=45)
|
||||||
|
plt.legend()
|
||||||
|
plt.show()
|
||||||
|
|
||||||
|
filler = np.empty_like(y_test_pred[:lookback-2])
|
||||||
|
# filler[:,:] = np.nan
|
||||||
|
print("====== Simple Forecasting =========")
|
||||||
|
future_all = np.append(filler, y_train_pred, axis=0)
|
||||||
|
future_all = np.append(future_all, y_test_pred, axis=0)
|
||||||
|
future_all = np.append(future_all, z_future_pred, axis=0)
|
||||||
|
future_all = pd.DataFrame(future_all)
|
||||||
|
# plt.plot(original_df)
|
||||||
|
plt.plot(original_df[6], color="gray", linestyle="--", linewidth=2, alpha=0.3)
|
||||||
|
print(future_all.head())
|
||||||
|
plt.plot(future_all.index, future_all[0], marker=".")
|
||||||
|
plt.title("Original data & Train+Test+Prediction Data")
|
||||||
|
|
||||||
|
|
||||||
|
# plt.plot(future_all.index, future_all[1], marker=".")
|
||||||
|
# plt.plot(future_all.index, future_all[2], marker=".")
|
||||||
|
plt.show()
|
||||||
|
print(result.tail())
|
||||||
|
if not os.path.exists("./models"):
|
||||||
|
os.makedirs("./models")
|
||||||
|
torch.save(model.state_dict(),"./models/my_model")
|
||||||
|
m = torch.jit.script(model)
|
||||||
|
m.save("./models/my_model.pt")
|
||||||
|
# writer.add_graph(model)
|
||||||
|
|
Loading…
Reference in New Issue
Block a user