Ajout stats

This commit is contained in:
Jérôme Delacotte
2025-05-25 20:22:09 +02:00
parent e07fdb38da
commit 26e91d1bc7
2 changed files with 163 additions and 213 deletions

View File

@@ -84,18 +84,6 @@ class Zeus_8_3_2_B_4_2(IStrategy):
},
"sma10": {
"color": "blue"
},
"min12_1d": {
"color": "red"
},
"max12_1d": {
"color": 'red'
},
"min50": {
"color": 'green'
},
"max50": {
"color": 'green'
}
},
"subplots": {
@@ -205,144 +193,36 @@ class Zeus_8_3_2_B_4_2(IStrategy):
protection_fibo = IntParameter(1, 10, default=2, space='protection')
sell_allow_decrease = DecimalParameter(0.005, 0.02, default=0.2, decimals=2, space='sell', optimize=True, load=True)
# Probabilité de hausse pour futur_percent_3h (en %):
# mid_smooth_1h_deriv1_bin B5 B4 B3 B2 B1 N0 H1 H2 H3 H4 H5
# sma24_deriv1_1h_bin
# B5 41.0 47.2 48.1 45.6 74.0 65.9 66.5 83.8 77.8 72.1 81.0
# B4 41.2 35.8 48.4 46.5 59.9 60.2 75.8 79.4 84.6 83.0 78.5
# B3 34.1 39.7 42.8 47.0 63.3 64.5 71.5 80.4 82.0 86.6 76.6
# B2 27.5 27.9 32.3 33.2 61.9 67.1 70.8 79.5 81.3 73.6 81.9
# B1 35.0 26.5 24.4 34.9 50.0 59.2 69.4 72.8 79.8 77.4 69.5
# N0 30.6 19.9 23.6 30.8 41.9 59.2 67.5 70.6 74.0 63.0 75.0
# H1 25.2 28.7 28.6 25.8 35.9 44.2 60.1 68.8 67.7 69.6 80.9
# H2 29.8 20.8 23.9 30.4 34.4 37.5 52.7 66.1 69.8 67.5 62.9
# H3 25.7 29.4 22.7 29.8 37.7 47.1 59.9 68.5 66.5 68.6 66.4
# H4 30.6 27.5 25.1 22.6 30.8 34.1 50.9 59.8 57.0 68.6 63.7
# H5 14.8 21.6 22.2 35.3 19.3 31.6 38.3 59.6 65.2 56.8 59.6
labels = ['B5', 'B4', 'B3', 'B2', 'B1', 'N0', 'H1', 'H2', 'H3', 'H4', 'H5']
index_labels = ['B5', 'B4', 'B3', 'B2', 'B1', 'N0', 'H1', 'H2', 'H3', 'H4', 'H5']
# Récupération des labels ordonnés
ordered_labels = ['B5', 'B4', 'B3', 'B2', 'B1', 'N0', 'H1', 'H2', 'H3', 'H4', 'H5']
label_to_index = {label: i for i, label in enumerate(ordered_labels)}
# Données sous forme de dictionnaire
# Bornes des quantiles pour
mid_smooth_24_deriv1_bins = [-37.4852, -0.7541, -0.4233, -0.2510, -0.1338, -0.0389, 0.0496, 0.1464, 0.2660, 0.4384, 0.7697, 48.2985]
sma144_deriv1_bins = [-0.2592, -0.0166, -0.0091, -0.0051, -0.0025, -0.0005, 0.0012, 0.0034, 0.0062, 0.0105, 0.0183, 0.2436]
smooth24_sma144_deriv1_matrice = {
'B5': [8.2, 4.1, 3.1, 3.4, 3.5, 3.0, 2.9, 2.8, 2.5, 3.0, 4.1],
'B4': [24.9, 13.5, 11.8, 11.0, 9.0, 9.3, 9.1, 8.9, 8.1, 7.8, 11.4],
'B3': [39.8, 24.7, 20.4, 18.8, 17.4, 16.0, 16.2, 14.7, 15.4, 15.5, 15.9],
'B2': [54.8, 40.6, 32.7, 28.3, 25.9, 24.3, 23.1, 24.0, 23.4, 24.2, 21.1],
'B1': [65.1, 52.9, 46.6, 44.7, 38.8, 37.7, 35.4, 33.6, 32.2, 33.1, 27.4],
'N0': [73.1, 62.9, 61.1, 59.0, 56.1, 52.4, 49.5, 48.5, 42.7, 39.9, 35.3],
'H1': [79.7, 72.5, 73.1, 72.6, 71.6, 69.8, 66.9, 63.8, 58.0, 53.2, 41.9],
'H2': [81.7, 79.8, 79.6, 80.8, 79.3, 80.1, 78.1, 76.7, 72.5, 65.7, 52.8],
'H3': [86.1, 87.7, 87.4, 87.8, 87.2, 86.5, 84.5, 84.4, 82.7, 78.8, 65.4],
'H4': [92.6, 93.4, 94.0, 93.4, 94.3, 94.0, 93.8, 93.7, 92.7, 89.6, 79.9],
'H5': [97.1, 97.5, 97.9, 98.2, 97.5, 98.2, 98.1, 98.2, 97.7, 97.4, 93.5],
}
smooth24_sma144_deriv1_matrice_df = pd.DataFrame(smooth24_sma144_deriv1_matrice, index=index_labels)
# Extraction de la matrice numérique
smooth24_sma144_deriv1_numeric_matrice = smooth24_sma144_deriv1_matrice_df.reindex(index=ordered_labels, columns=ordered_labels).values
# Bornes des quantiles pour
mid_smooth_deriv2_24_bins = [-10.2968, -0.2061, -0.0996, -0.0559, -0.0292, -0.0093, 0.0083, 0.0281, 0.0550, 0.0999, 0.2072, 10.2252]
# =========================================================================
# variables pour probabilité 144 bougies
mid_smooth_1h_bins = [-2.0622, -0.1618, -0.0717, -0.0353, -0.0135, 0.0, 0.0085, 0.0276, 0.0521, 0.0923, 0.1742, 2.3286]
sma24_deriv1_1h_bins = [-0.84253877, -0.13177195, -0.07485074, -0.04293497, -0.02033502, -0.00215711,
0.01411933, 0.03308264, 0.05661652, 0.09362708, 0.14898214, 0.50579505]
smooth_smadiff_matrice = {
"B5": [41.0, 41.2, 34.1, 27.5, 35.0, 30.6, 25.2, 29.8, 25.7, 30.6, 14.8],
"B4": [47.2, 35.8, 39.7, 27.9, 26.5, 19.9, 28.7, 20.8, 29.4, 27.5, 21.6],
"B3": [48.1, 48.4, 42.8, 32.3, 24.4, 23.6, 28.6, 23.9, 22.7, 25.1, 22.2],
"B2": [45.6, 46.5, 47.0, 33.2, 34.9, 30.8, 25.8, 30.4, 29.8, 22.6, 35.3],
"B1": [74.0, 59.9, 63.3, 61.9, 50.0, 41.9, 35.9, 34.4, 37.7, 30.8, 19.3],
"N0": [65.9, 60.2, 64.5, 67.1, 59.2, 59.2, 44.2, 37.5, 47.1, 34.1, 31.6],
"H1": [66.5, 75.8, 71.5, 70.8, 69.4, 67.5, 60.1, 52.7, 59.9, 50.9, 38.3],
"H2": [83.8, 79.4, 80.4, 79.5, 72.8, 70.6, 68.8, 66.1, 68.5, 59.8, 59.6],
"H3": [77.8, 84.6, 82.0, 81.3, 79.8, 74.0, 67.7, 69.8, 66.5, 57.0, 65.2],
"H4": [72.1, 83.0, 86.6, 73.6, 77.4, 63.0, 69.6, 67.5, 68.6, 68.6, 56.8],
"H5": [81.0, 78.5, 76.6, 81.9, 69.5, 75.0, 80.9, 62.9, 66.4, 63.7, 59.6]
}
smooth_smadiff_matrice_df = pd.DataFrame(smooth_smadiff_matrice, index=index_labels)
# Extraction de la matrice numérique
smooth_smadiff_numeric_matrice = smooth_smadiff_matrice_df.reindex(index=ordered_labels, columns=ordered_labels).values
# =========================================================================
# variables pour probabilité
smooth_pct_max_hour_matrice = {
'B5': [43.5, 52.7, 62.3, 65.5, 86.9, 63.1, 81.5, 86.7, 90.2, 90.1, 93.0],
'B4': [34.9, 46.3, 53.6, 60.4, 75.8, 83.3, 81.5, 83.0, 86.4, 86.9, 91.1],
'B3': [20.5, 35.4, 43.7, 54.5, 69.7, 71.6, 80.4, 84.7, 86.7, 84.9, 85.9],
'B2': [11.5, 25.4, 36.4, 47.9, 62.3, 65.7, 76.5, 82.0, 81.8, 82.8, 77.7],
'B1': [3.6, 14.9, 26.8, 41.1, 55.6, 71.4, 74.3, 79.8, 80.8, 82.3, 75.1],
'N0': [0.0, 6.9, 18.3, 32.0, 47.2, 62.1, 69.1, 74.8, 78.3, 76.6, 71.6],
'H1': [0.7, 3.8, 9.4, 24.2, 40.6, 59.7, 67.8, 70.9, 73.4, 72.1, 70.0],
'H2': [0.0, 0.6, 6.5, 13.6, 33.6, 51.7, 64.9, 70.2, 68.4, 67.8, 65.8],
'H3': [1.4, 0.6, 2.6, 6.6, 23.3, 50.2, 56.2, 63.6, 65.7, 64.5, 64.7],
'H4': [1.6, 0.3, 3.0, 3.2, 11.4, 32.7, 44.0, 54.9, 61.7, 60.6, 63.6],
'H5': [1.8, 2.6, 0.6, 1.1, 9.7, 12.9, 26.2, 44.5, 52.6, 54.5, 56.2],
# Bornes des quantiles pour
sma5_deriv1_1h = [-2.2582, -0.2665, -0.1475, -0.0860, -0.0428, -0.0084, 0.0244, 0.0592, 0.1038, 0.1656, 0.2766, 1.8331]
# Bornes des quantiles pour
mid_smooth_3_deriv1 = [-1.5837, -0.0765, -0.0451, -0.0280, -0.0155, -0.0046, 0.0058, 0.0167, 0.0298, 0.0472, 0.0785, 1.1362]
sma5_deriv1_1h_mid_smooth_3_deriv1_matrice = {
'B5': [6.1, 11.7, 15.6, 20.6, 24.0, 26.0, 30.9, 40.7, 51.4, 54.9, 76.2],
'B4': [10.4, 13.2, 19.6, 22.7, 31.9, 36.8, 44.5, 50.8, 68.0, 74.6, 88.2],
'B3': [10.2, 16.7, 24.4, 25.1, 32.2, 42.6, 53.7, 60.0, 74.3, 78.8, 88.2],
'B2': [11.5, 18.0, 24.8, 29.1, 35.6, 44.9, 54.1, 66.4, 75.5, 81.3, 90.0],
'B1': [10.2, 18.8, 26.0, 31.6, 39.3, 48.7, 60.7, 71.4, 78.5, 83.4, 90.5],
'N0': [12.5, 22.6, 26.4, 34.3, 42.2, 56.8, 63.3, 71.4, 80.7, 83.3, 89.5],
'H1': [14.4, 24.5, 28.7, 40.0, 49.2, 60.2, 68.0, 72.3, 82.2, 83.4, 92.5],
'H2': [12.7, 26.3, 33.5, 42.6, 53.6, 61.9, 68.8, 75.1, 80.9, 83.8, 92.0],
'H3': [13.2, 26.9, 40.9, 46.8, 56.9, 65.7, 72.5, 75.8, 84.4, 86.9, 93.2],
'H4': [15.8, 31.3, 43.6, 49.9, 64.2, 68.6, 75.9, 77.0, 85.4, 88.9, 95.0],
'H5': [18.8, 39.1, 54.7, 64.0, 70.1, 79.6, 77.7, 81.8, 89.9, 89.4, 96.4]
}
smooth_pct_max_hour_matrice_df = pd.DataFrame(smooth_pct_max_hour_matrice, index=index_labels)
sma5_deriv1_1h_mid_smooth_3_deriv1_matrice_df = pd.DataFrame(sma5_deriv1_1h_mid_smooth_3_deriv1_matrice, index=index_labels)
# Extraction de la matrice numérique
smooth_pct_max_hour_numeric_matrice = smooth_pct_max_hour_matrice_df.reindex(index=ordered_labels, columns=ordered_labels).values
# =========================================================================
# variables pour probabilité 144 bougies
# Données sous forme de dictionnaire
smooth_sma_24_diff_matrice = {
"B5":[40.3, 52.1, 60.2, 68.6, 86.3, 76.5, 75.1, 83.5, 88.7, 96.3, 91.6],
"B4":[26.6, 39.4, 48.1, 57.0, 76.7, 82.4, 79.6, 82.4, 91.8, 86.6, 87.8],
"B3":[21.5, 27.7, 42.7, 53.2, 70.9, 76.6, 80.8, 79.4, 88.3, 88.0, 87.8],
"B2":[15.1, 20.8, 32.9, 46.9, 59.1, 79.6, 82.5, 79.6, 80.8, 87.0, 85.5],
"B1":[15.7, 15.4, 21.9, 29.4, 48.3, 66.6, 76.4, 77.8, 80.8, 83.5, 81.4],
"N0":[15.0, 10.5, 20.1, 24.5, 36.9, 59.9, 68.8, 74.1, 77.7, 83.0, 75.7],
"H1":[14.8, 10.7, 15.1, 21.0, 30.1, 47.3, 59.2, 70.4, 76.1, 82.7, 82.6],
"H2":[7.9, 8.6, 13.6, 20.6, 27.0, 39.5, 55.2, 68.9, 69.0, 78.4, 83.4],
"H3":[9.2, 6.2, 12.6, 21.7, 23.6, 33.1, 42.3, 57.8, 66.0, 71.9, 81.9],
"H4":[4.8, 13.1, 16.3, 14.5, 19.5, 26.4, 35.6, 49.2, 63.2, 68.2, 71.6],
"H5":[17.9, 25.7, 20.8, 17.8, 8.7, 18.5, 32.3, 37.7, 49.3, 59.8, 61.7]
}
smooth_sma_24_diff_matrice_df = pd.DataFrame(smooth_smadiff_matrice, index=index_labels)
# Extraction de la matrice numérique
smooth_sma_24_diff_numeric_matrice = smooth_sma_24_diff_matrice_df.reindex(index=ordered_labels, columns=ordered_labels).values
# Bornes des quantiles pour
mid_smooth_1h_deriv1 = [-11.5091, -0.4887, -0.1902, -0.0823, -0.0281, -0.0008, 0.0110, 0.0439, 0.1066, 0.2349, 0.5440, 14.7943]
# Bornes des quantiles pour
mid_smooth_1h_deriv2 = [-6.2109, -0.2093, -0.0900, -0.0416, -0.0171, -0.0035, 0.0033, 0.0168, 0.0413, 0.0904, 0.2099, 6.2109]
# =========================================================================
# variables pour probabilité jour
# Bornes des quantiles pour
mid_smooth_1h_deriv1_1d_bins = [-11.5091, -0.4887, -0.1902, -0.0823, -0.0281, -0.0008, 0.0110, 0.0439, 0.1066, 0.2349, 0.5440, 14.7943]
# Bornes des quantiles pour
sma24_deriv1_1h_1d_bins = [-2.1101, -0.1413, -0.0768, -0.0433, -0.0196, -0.0028, 0.0120, 0.0304, 0.0560, 0.0933, 0.1568, 0.7793]
smooth_1d_sma_2_diff_1d_matrice = {
'B5': [42.5, 47.8, 52.7, 48.5, 54.2, 64.6, 70.8, 69.2, 72.3, 71.2, 79.9],
'B4': [34.1, 43.5, 45.7, 53.7, 52.6, 67.3, 63.9, 70.8, 73.5, 67.9, 82.9],
'B3': [33.7, 42.7, 45.8, 49.6, 49.0, 57.8, 64.7, 68.7, 70.7, 72.6, 87.1],
'B2': [30.0, 36.6, 40.5, 42.3, 51.2, 62.0, 64.4, 65.2, 69.8, 74.3, 84.9],
'B1': [21.4, 29.8, 33.6, 39.9, 49.4, 56.1, 59.9, 63.9, 71.0, 72.8, 79.6],
'N0': [19.8, 30.4, 34.5, 41.5, 42.2, 48.1, 61.7, 64.5, 73.7, 69.3, 79.4],
'H1': [22.7, 27.0, 36.9, 34.8, 46.3, 50.2, 58.9, 63.1, 65.8, 66.5, 80.0],
'H2': [23.1, 34.3, 32.2, 31.0, 38.8, 54.3, 53.6, 55.1, 60.3, 63.3, 77.4],
'H3': [17.0, 32.6, 37.4, 31.0, 35.1, 36.7, 45.2, 53.0, 55.4, 58.6, 71.8],
'H4': [22.7, 31.9, 28.0, 35.8, 36.3, 46.9, 53.9, 53.8, 58.8, 58.0, 67.6],
'H5': [18.8, 27.0, 32.1, 36.0, 41.9, 48.1, 49.8, 53.6, 57.2, 62.2, 65.2],
}
smooth_1d_sma_2_diff_1d_matrice_df = pd.DataFrame(smooth_smadiff_matrice, index=index_labels)
# Extraction de la matrice numérique
smooth_1d_sma_2_diff_1d_numeric_matrice = smooth_1d_sma_2_diff_1d_matrice_df.reindex(index=ordered_labels, columns=ordered_labels).values
sma5_deriv1_1h_mid_smooth_3_deriv1__numeric_matrice = sma5_deriv1_1h_mid_smooth_3_deriv1_matrice_df.reindex(index=ordered_labels, columns=ordered_labels).values
paliers = {}
@@ -371,6 +251,16 @@ class Zeus_8_3_2_B_4_2(IStrategy):
# allow_to_buy = True #(not self.stop_all) #& (not self.all_down)
allow_to_buy = not self.pairs[pair]['stop'] #and val > self.buy_val.value #not last_candle['tendency'] in ('B-', 'B--') # (rate <= float(limit)) | (entry_tag == 'force_entry')
# if allow_to_buy:
# poly_func, x_future, y_future, count = self.polynomial_forecast(
# dataframe['mid_smooth_12'],
# window=self.buy_horizon_predict_1h.value * 12,
# degree=4,
# n_future=3)
#
# if count < 3:
# allow_to_buy = False
if allow_to_buy:
self.trades = list()
self.pairs[pair]['first_buy'] = rate
@@ -431,6 +321,7 @@ class Zeus_8_3_2_B_4_2(IStrategy):
dispo=dispo,
profit=round(trade.calc_profit(rate, amount), 2)
)
self.pairs[pair]['count_of_buys'] = 0
self.pairs[pair]['max_touch'] = 0
self.pairs[pair]['last_buy'] = 0
self.pairs[pair]['last_date'] = current_time
@@ -537,7 +428,7 @@ class Zeus_8_3_2_B_4_2(IStrategy):
if self.columns_logged % 30 == 0:
self.printLog(
f"| {'Date':<16} | {'Action':<10} |{'Pair':<5}| {'Trade Type':<18} |{'Rate':>8} | {'Dispo':>6} | {'Profit':>8} | {'Pct':>6} | {'max_touch':>11} | {'last_lost':>12} | {'last_max':>7}|{'Buys':>4}| {'Stake':>5} |"
f"Tdc|Tdh|Tdd| drv1 |drv_1h|drv_1d| drv2 |drv_2h|drv_2d|val144|val1h |"
f"Tdc|{'val':>6}| sma5 |smooth |"
)
self.printLineLog()
@@ -581,16 +472,17 @@ class Zeus_8_3_2_B_4_2(IStrategy):
# val144 = self.getProbaHausse144(last_candle)
# val1h = self.getProbaHausse1h(last_candle)
val = self.getProbaHausse(last_candle)
self.printLog(
f"| {date:<16} | {action:<10} | {pair[0:3]:<3} | {trade_type or '-':<18} |{rate or '-':>9}| {dispo or '-':>6} "
f"| {profit or '-':>8} | {pct_max or '-':>6} | {round(self.pairs[pair]['max_touch'], 2) or '-':>11} | {last_lost or '-':>12} "
f"| {int(self.pairs[pair]['last_max']) or '-':>7} |{buys or '-':>2}-{self.pairs[pair]['last_palier_index'] or '-':>2}|{stake or '-':>7}"
f"| {int(self.pairs[pair]['last_max']) or '-':>7} |{buys or '-':>4}|{stake or '-':>7}"
f"|{last_candle['tendency_12'] or '-':>3}|" #{last_candle['tendency_1h'] or '-':>3}|{last_candle['tendency_1d'] or '-':>3}"
# f"|{round(last_candle['mid_smooth_24_deriv1'],3) or '-':>6}|{round(last_candle['mid_smooth_1h_deriv1'],3) or '-':>6}|{round(last_candle['mid_smooth_deriv1_1d'],3) or '-' :>6}|"
# f"{round(last_candle['mid_smooth_24_deriv2'],3) or '-' :>6}|{round(last_candle['mid_smooth_1h_deriv2'],3) or '-':>6}|{round(last_candle['mid_smooth_deriv2_1d'],3) or '-':>6}|"
# f"{round(val144, 1) or '-' :>6}|{round(val1h, 1) or '-':>6}|"
f"{round(last_candle['sma20_deriv1'], 4) or '-' :>6}|{round(last_candle['sma5_deriv1_1d'], 4) or '-' :>6}"
f"{round(val, 1) or '-' :>6}|"
f"{round(last_candle['sma5_deriv1_1h'], 4) or '-' :>7}|{round(last_candle['mid_smooth_3_deriv1'], 4) or '-' :>7}|"
)
def printLineLog(self):
@@ -599,7 +491,7 @@ class Zeus_8_3_2_B_4_2(IStrategy):
f"+{'-' * 18}+{'-' * 12}+{'-' * 5}+{'-' * 20}+{'-' * 9}+{'-' * 8}+{'-' * 10}+{'-' * 8}+{'-' * 13}+{'-' * 14}+{'-' * 9}+{'-' * 4}+{'-' * 7}+"
f"{'-' * 3}"
#"+{'-' * 3}+{'-' * 3}
# f"+{'-' * 6}+{'-' * 6}+{'-' * 6}+{'-' * 6}+{'-' * 6}+{'-' * 6}+"
f"+{'-' * 6}+{'-' * 7}+{'-' * 7}+"
)
def printLog(self, str):
@@ -637,16 +529,6 @@ class Zeus_8_3_2_B_4_2(IStrategy):
dataframe['haclose'] = heikinashi['close']
dataframe['hapercent'] = (dataframe['haclose'] - dataframe['haopen']) / dataframe['haclose']
dataframe['min'] = talib.MIN(dataframe['close'], timeperiod=200)
dataframe['min12'] = talib.MIN(dataframe['close'], timeperiod=12)
dataframe['min50'] = talib.MIN(dataframe['close'], timeperiod=50)
dataframe['min200'] = talib.MIN(dataframe['close'], timeperiod=200)
dataframe['max200'] = talib.MAX(dataframe['close'], timeperiod=200)
dataframe['max50'] = talib.MAX(dataframe['close'], timeperiod=50)
dataframe['max200_diff'] = (dataframe['max200'] - dataframe['close']) / dataframe['close']
dataframe['sma5'] = talib.SMA(dataframe, timeperiod=5)
dataframe['sma10'] = talib.SMA(dataframe, timeperiod=10)
self.calculeDerivees(dataframe, 'sma10')
@@ -703,14 +585,19 @@ class Zeus_8_3_2_B_4_2(IStrategy):
# informative['volatility'] = talib.STDDEV(informative['close'], timeperiod=14) / informative['close']
# informative['atr'] = (talib.ATR(informative['high'], informative['low'], informative['close'], timeperiod=14)) / informative['close']
informative['rsi'] = talib.RSI(informative['close']) #, timeperiod=7)
informative['max12'] = talib.MAX(informative['close'], timeperiod=12)
informative['min12'] = talib.MIN(informative['close'], timeperiod=12)
informative['sma5'] = talib.SMA(informative, timeperiod=5)
informative['sma24'] = talib.SMA(informative, timeperiod=24)
self.calculeDerivees(informative, 'sma5')
self.calculeDerivees(informative, 'sma24')
# self.calculateDownAndUp(informative, limit=0.0012)
informative['futur_percent_3'] = 100 * ((informative['sma5'].shift(-3) - informative['sma5']) / informative['sma5'])
print("##################")
print("# STAT HOUR")
print("##################")
self.calculateStats(informative, 'sma5_deriv1', 'futur_percent_3')
dataframe = merge_informative_pair(dataframe, informative, self.timeframe, "1h", ffill=True)
################### INFORMATIVE 1d
@@ -721,16 +608,21 @@ class Zeus_8_3_2_B_4_2(IStrategy):
# informative = self.apply_regression_derivatives(informative, column='mid', window=5, degree=4)
informative['max12'] = talib.MAX(informative['close'], timeperiod=12)
informative['min12'] = talib.MIN(informative['close'], timeperiod=12)
informative['max3'] = talib.MAX(informative['close'], timeperiod=3)
informative['min3'] = talib.MIN(informative['close'], timeperiod=3)
# informative['rsi'] = talib.RSI(informative['close']) #, timeperiod=7)
# self.calculeDerivees(informative, 'rsi')
#
informative['sma5'] = talib.SMA(informative, timeperiod=5)
self.calculeDerivees(informative, 'sma5', factor_1=10, factor_2=1)
informative['futur_percent_3'] = 100 * ((informative['sma5'].shift(-3) - informative['sma5']) / informative['sma5'])
print("##################")
print("# STAT DAY")
print("##################")
self.calculateStats(informative, 'sma5_deriv1', 'futur_percent_3')
# informative['close_smooth'] = self.conditional_smoothing(informative['mid'].dropna(), threshold=0.0015).dropna()
# informative['smooth'], informative['deriv1'], informative['deriv2'] = self.smooth_and_derivatives(informative['close_smooth'])
# informative['deriv1'] = 100 * informative['deriv1'] / informative['mid']
@@ -784,8 +676,8 @@ class Zeus_8_3_2_B_4_2(IStrategy):
# dataframe['deriv1_1h'] = 100 * dataframe['deriv1_1h'] / dataframe['mid_smooth_1h']
# dataframe['deriv2_1h'] = 1000 * dataframe['deriv2_1h'] / dataframe['mid_smooth_1h']
# dataframe['sma5_1h'] = dataframe['sma5_1h'].rolling(window=horizon_h).mean()
horizon_h = 12
dataframe['sma5_1h'] = dataframe['sma5_1h'].rolling(window=horizon_h).mean()
# dataframe['sma5_deriv1_1h'] = dataframe['sma5_deriv1_1h'].rolling(window=horizon_h).mean()
# dataframe['sma24_1h'] = dataframe['sma24_1h'].rolling(window=horizon_h).mean()
# dataframe['sma24_deriv1_1h'] = dataframe['sma24_deriv1_1h'].rolling(window=horizon_h).mean()
@@ -806,12 +698,12 @@ class Zeus_8_3_2_B_4_2(IStrategy):
# ===============================
# Lissage des valeurs Journalières
# horizon_d = 24 * 5
horizon_d = 12 * 5 * 24
# dataframe['mid_smooth_1d'] = dataframe['mid_smooth_1d'].rolling(window=horizon_d * 5).mean()
# dataframe["mid_smooth_deriv1_1d"] = dataframe["mid_smooth_1d"].rolling(horizon_d).mean().diff() / horizon_d
# dataframe["mid_smooth_deriv2_1d"] = horizon_d * dataframe["mid_smooth_deriv1_1d"].rolling(horizon_d).mean().diff()
#
# dataframe['sma5_1d'] = dataframe['sma5_1d'].rolling(window=horizon_d * 5).mean()
dataframe['sma5_1d'] = dataframe['sma5_1d'].rolling(window=horizon_d).mean()
# dataframe['sma5_deriv1_1d'] = dataframe['sma5_deriv1_1d'].rolling(window=horizon_d).mean()
# dataframe['sma24_1d'] = dataframe['sma24_1d'].rolling(window=horizon_d).mean()
# dataframe['sma24_deriv1_1d'] = dataframe['sma24_deriv1_1d'].rolling(window=horizon_d).mean()
@@ -831,6 +723,11 @@ class Zeus_8_3_2_B_4_2(IStrategy):
#
# self.calculateProbabilite2Index(dataframe, ['futur_percent_1d'], 'sma24_deriv1_1h', 'sma5_1d')
print("##################")
print("# STAT DAY vs HOUR")
print("##################")
self.calculateProbabilite2Index(dataframe, futur_cols=['futur_percent_3_1h'], indic_1='sma5_deriv1_1h', indic_2='mid_smooth_3_deriv1')
return dataframe
def calculeDerivees(self, dataframe, indic, factor_1=100, factor_2=10):
@@ -1118,7 +1015,7 @@ class Zeus_8_3_2_B_4_2(IStrategy):
# self.get_active_stake()
# val144 = self.getProbaHausse144(last_candle)
# val1h = self.getProbaHausse1h(last_candle)
# val = self.getProbaHausse144(last_candle)
# val = self.getProbaHausse(last_candle)
# buy = False
# previous = 0
@@ -1155,8 +1052,7 @@ class Zeus_8_3_2_B_4_2(IStrategy):
poly_func, x_future, y_future, count = self.polynomial_forecast(
dataframe['mid_smooth_12'],
window=self.buy_horizon_predict_1h.value * 12,
degree=4,
n_future=3)
degree=4)
if count < 3:
return None
@@ -1166,6 +1062,7 @@ class Zeus_8_3_2_B_4_2(IStrategy):
stake_amount = min(min(max_amount, self.wallets.get_available_stake_amount()),
self.adjust_stake_amount(pair, last_candle) - 10 * pct_first / pct) # min(200, self.adjust_stake_amount(pair, last_candle) * self.fibo[count_of_buys])
self.pairs[trade.pair]['count_of_buys'] = self.pairs[trade.pair]['count_of_buys'] + 1
trade_type = last_candle['enter_tag'] if last_candle['enter_long'] == 1 else 'pct48'
self.log_trade(
last_candle=last_candle,
@@ -1187,6 +1084,23 @@ class Zeus_8_3_2_B_4_2(IStrategy):
except Exception as exception:
print(exception)
return None
# if (count_of_buys >= 6):
# self.log_trade(
# last_candle=last_candle,
# date=current_time,
# action="Sell",
# dispo=dispo,
# pair=trade.pair,
# rate=current_rate,
# trade_type="Stop loss",
# profit=round(current_profit, 4), # round(current_profit * trade.stake_amount, 2),
# buys=trade.nr_of_successful_entries + 1,
# stake=-trade.stake_amount
# )
# self.pairs[trade.pair]['last_buy'] = current_rate
# self.pairs[trade.pair]['max_touch'] = last_candle['close']
# self.pairs[trade.pair]['last_candle'] = last_candle
# return -trade.stake_amount
# if (count_of_buys < limit_buy and pct_max > pct and current_profit > 0.004) \
# and (last_candle['rsi_deriv1_1h'] >= -5) \
@@ -1216,33 +1130,17 @@ class Zeus_8_3_2_B_4_2(IStrategy):
return None
# def getProbaHausse144(self, last_candle):
# value_1 = self.getValuesFromTable(self.mid_smooth_24_deriv1_bins, last_candle['mid_smooth_24_deriv1'])
# value_2 = self.getValuesFromTable(self.sma144_deriv1_bins, last_candle['sma144_deriv1'])
#
# val = self.approx_val_from_bins(
# matrice=self.smooth24_sma144_deriv1_matrice_df,
# numeric_matrice=self.smooth24_sma144_deriv1_numeric_matrice,
# row_label=value_2,
# col_label=value_1)
# return val
#
# def getProbaHausse1h(self, last_candle):
# value_1 = self.getValuesFromTable(self.mid_smooth_1h_bins, last_candle['mid_smooth_1h_deriv1'])
# value_2 = self.getValuesFromTable(self.sma24_deriv1_1h_bins, last_candle['sma24_deriv1_1h'])
#
# val = self.approx_val_from_bins(matrice=self.smooth_smadiff_matrice_df, numeric_matrice=self.smooth_smadiff_numeric_matrice,
# row_label=value_2,
# col_label=value_1)
# return val
#
# def getProbaHausse1d(self, last_candle):
# value_1 = self.getValuesFromTable(self.mid_smooth_1h_bins, last_candle['mid_smooth_deriv1_1d'])
# value_2 = self.getValuesFromTable(self.sma24_deriv1_1h_bins, last_candle['sma5_deriv1_1d'])
#
# val = self.approx_val_from_bins(matrice=self.smooth_smadiff_matrice_df, numeric_matrice=self.smooth_smadiff_numeric_matrice, row_label=value_2,
# col_label=value_1)
# return val
def getProbaHausse(self, last_candle):
value_1 = self.getValuesFromTable(self.sma5_deriv1_1h, last_candle['sma5_deriv1_1h'])
value_2 = self.getValuesFromTable(self.mid_smooth_3_deriv1, last_candle['mid_smooth_3_deriv1'])
val = self.approx_val_from_bins(
matrice=self.sma5_deriv1_1h_mid_smooth_3_deriv1_matrice_df,
numeric_matrice=self.sma5_deriv1_1h_mid_smooth_3_deriv1__numeric_matrice,
row_label=value_1,
col_label=value_2
)
return val
def adjust_stake_amount(self, pair: str, last_candle: DataFrame):
# Calculer le minimum des 14 derniers jours
@@ -1688,7 +1586,7 @@ class Zeus_8_3_2_B_4_2(IStrategy):
#
# return result
def polynomial_forecast(self, series: pd.Series, window: int = 20, degree: int = 2, n_future: int = 3):
def polynomial_forecast(self, series: pd.Series, window: int = 20, degree: int = 2, steps = [12, 24, 36]):
"""
Calcule une régression polynomiale sur les `window` dernières valeurs de la série,
puis prédit les `n_future` prochaines valeurs.
@@ -1708,22 +1606,73 @@ class Zeus_8_3_2_B_4_2(IStrategy):
coeffs = np.polyfit(x, recent_y, degree)
poly = np.poly1d(coeffs)
x_future = np.arange(window, window + n_future)
x_future = np.arange(window, window + len(steps))
y_future = poly(x_future)
# Affichage de la fonction
# print("Fonction polynomiale trouvée :")
# print(poly_func)
# print(poly)
current = series.iloc[-1]
count = 0
for future_step in [12, 24, 36]: #range(1, n_future + 1)
for future_step in steps: #range(1, n_future + 1)
future_x = window - 1 + future_step
prediction = poly(future_x)
# series.loc[series.index[future_x], f'poly_pred_t+{future_step}'] = prediction
# Afficher les prédictions
# print(f" → t+{future_step}: x={future_x}, y={prediction:.2f}")
if prediction > 0:
# print(f"{current} → t+{future_step}: x={future_x}, y={prediction:.2f}")
if prediction > current:
count += 1
return poly, x_future, y_future, count
def calculateStats2(self, df, index, target):
# Nombre de tranches (modifiable)
n_bins_indice = 11
n_bins_valeur = 11
# Tranches dynamiques
# df['indice_tranche'] = pd.qcut(df[f"{index}"], q=n_bins_indice, duplicates='drop')
# df['valeur_tranche'] = pd.qcut(df[f"{target}"], q=n_bins_valeur, duplicates='drop')
df[f"{index}_bin"], bins_1h = pd.qcut(df[f"{index}"], q=n_bins_indice, labels=self.labels, retbins=True,
duplicates='drop')
df[f"{target}_bin"], bins_1d = pd.qcut(df[f"{target}"], q=n_bins_valeur, labels=self.labels, retbins=True,
duplicates='drop')
# Affichage formaté pour code Python
print(f"Bornes des quantiles pour {index} : [{', '.join([f'{b:.4f}' for b in bins_1h])}]")
print(f"Bornes des quantiles pour {target} : [{', '.join([f'{b:.4f}' for b in bins_1d])}]")
# Tableau croisé (compte)
tableau = pd.crosstab(df[f"{index}_bin"], df[f"{target}_bin"])
# Facultatif : en pourcentages
tableau_pct = tableau.div(tableau.sum(axis=1), axis=0) * 100
# Affichage
print("Répartition brute :")
print(tableau)
print("\nRépartition en % par ligne :")
print(tableau_pct.round(2))
def calculateStats(self, df, index, target):
# Nombre de tranches (modifiable)
n_bins_indice = 11
n_bins_valeur = 11
# Créer les tranches dynamiques
df['indice_tranche'] = pd.qcut(df[index], q=n_bins_indice, duplicates='drop')
df['valeur_tranche'] = pd.qcut(df[target], q=n_bins_valeur, duplicates='drop')
# Créer un tableau croisé avec la moyenne des valeurs
pivot_mean = df.pivot_table(
index='indice_tranche',
columns='valeur_tranche',
values=target, # <-- c'est la colonne qu'on agrège
aggfunc='mean' # <-- on calcule la moyenne
)
# Résultat
print("Moyenne des valeurs par double-tranche :")
print(pivot_mean.round(2))