# Zeus Strategy: First Generation of GodStra Strategy with maximum # AVG/MID profit in USDT # Author: @Mablue (Masoud Azizi) # github: https://github.com/mablue/ # IMPORTANT: INSTALL TA BEFOUR RUN(pip install ta) # freqtrade hyperopt --hyperopt-loss SharpeHyperOptLoss --spaces buy sell roi --strategy Zeus # --- Do not remove these libs --- from datetime import timedelta, datetime from freqtrade.persistence import Trade from freqtrade.strategy import (BooleanParameter, CategoricalParameter, DecimalParameter, stoploss_from_open, IntParameter, IStrategy, merge_informative_pair, informative, stoploss_from_absolute) import pandas as pd import numpy as np from pandas import DataFrame from typing import Optional, Union, Tuple import logging import configparser from technical import pivots_points # -------------------------------- # Add your lib to import here test git import ta import talib.abstract as talib import freqtrade.vendor.qtpylib.indicators as qtpylib import requests from datetime import timezone, timedelta from scipy.signal import savgol_filter from ta.trend import SMAIndicator, EMAIndicator, MACD, ADXIndicator from collections import Counter logger = logging.getLogger(__name__) from tabulate import tabulate def pprint_df(dframe): print(tabulate(dframe, headers='keys', tablefmt='psql', showindex=False)) def normalize(df): df = (df - df.min()) / (df.max() - df.min()) return df class Zeus_8_3_2_B_4_2(IStrategy): levels = [1, 2, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20] startup_candle_count = 12 * 24 * 2 # ROI table: minimal_roi = { "0": 0.564, "567": 0.273, "2814": 0.12, "7675": 0 } # Stoploss: stoploss = -1 # 0.256 # Custom stoploss use_custom_stoploss = True # Buy hypers timeframe = '5m' max_open_trades = 5 max_amount = 40 # DCA config position_adjustment_enable = True plot_config = { "main_plot": { "sma5_1h": { "color": "white" }, "sma5_1d": { "color": "blue" }, "sma20": { "color": "yellow" }, "bb_lowerband": { "color": "#da59a6"}, "bb_upperband": { "color": "#da59a6", }, "sma10": { "color": "blue" } }, "subplots": { "Pct": { "sma20_deriv1": { 'color': "green" }, "down_pct": { "color": "blue" }, "down_pct_1h": { "color": "red" }, "down_pct_1d": { "color": "red" } }, "Rsi": { "rsi": { "color": "pink" }, "rsi_1h": { "color": "red" }, "rsi_1d": { "color": "blue" } }, "Rsi_deriv": { "rsi_deriv1_1h": { "color": "red" }, "rsi_deriv1_1d": { "color": "blue" }, }, "Down": { "down_count_1h": { "color": "green" }, "up_count_1h": { "color": "blue" } }, # "Diff": { # "sma10_deriv1": { # "color": "#74effc" # } # }, "smooth": { 'mid_smooth_deriv1_1d': { "color": "blue" }, 'mid_smooth_1h_deriv1': { "color": "red" }, 'mid_smooth_deriv2_1d': { "color": "pink" }, 'mid_smooth_1h_deriv2': { "color": "#da59a6" } } } } columns_logged = False pairs = { pair: { "first_buy": 0, "last_max": 0, "trade_info": {}, "max_touch": 0.0, "last_min": 0.0, "last_sell": 0.0, "last_buy": 0.0, 'count_of_buys': 0, 'current_profit': 0, 'expected_profit': 0, "last_candle": {}, "last_trade": None, "last_count_of_buys": 0, 'base_stake_amount': 0, 'stop_buy': False, 'last_date': 0, 'stop': False, 'max_profit': 0, 'last_palier_index': -1 } for pair in ["BTC/USDC", "ETH/USDC", "DOGE/USDC", "XRP/USDC", "SOL/USDC", "BTC/USDT", "ETH/USDT", "DOGE/USDT", "XRP/USDT", "SOL/USDT"] } # 20 20 40 60 100 160 260 420 # 50 50 100 300 500 # fibo = [1, 1, 2, 3, 5, 8, 13, 21] # my fibo # 50 50 50 100 100 150 200 250 350 450 600 1050 fibo = [1, 1, 1, 2, 2, 3, 4, 5, 7, 9, 12, 16, 21] baisse = [1, 2, 3, 5, 7, 10, 14, 19, 26, 35, 47, 63, 84] # Ma suite 1 1 1 2 2 3 4 5 7 9 12 16 21 # Mise 50 50 50 100 100 150 200 250 350 450 600 800 1050 # Somme Mises 50 100 150 250 350 500 700 950 1300 1750 2350 3150 4200 # baisse 1 2 3 5 7 10 14 19 26 35 47 63 84 factors = [1, 1.1, 1.25, 1.5, 2.0, 3] thresholds = [2, 5, 10, 20, 30, 50] trades = list() max_profit_pairs = {} protection_percent_buy_lost = IntParameter(1, 10, default=5, space='protection') protection_fibo = IntParameter(1, 10, default=2, space='protection') sell_allow_decrease = DecimalParameter(0.005, 0.02, default=0.2, decimals=2, space='sell', optimize=True, load=True) labels = ['B5', 'B4', 'B3', 'B2', 'B1', 'N0', 'H1', 'H2', 'H3', 'H4', 'H5'] index_labels = ['B5', 'B4', 'B3', 'B2', 'B1', 'N0', 'H1', 'H2', 'H3', 'H4', 'H5'] # Récupération des labels ordonnés ordered_labels = ['B5', 'B4', 'B3', 'B2', 'B1', 'N0', 'H1', 'H2', 'H3', 'H4', 'H5'] label_to_index = {label: i for i, label in enumerate(ordered_labels)} # ========================================================================= # variables pour probabilité # Bornes des quantiles pour ema_volume = [-8.9178, -0.0196, -0.0096, -0.0053, -0.0026, -0.0007, 0.0009, 0.0029, 0.0056, 0.0101, 0.0200, 3.8009] # Bornes des quantiles pour mid_smooth_1h_deriv1 = [-1.0482, -0.0571, -0.0336, -0.0206, -0.0113, -0.0033, 0.0044, 0.0127, 0.0225, 0.0356, 0.0591, 0.8335] ema_volume_mid_smooth_1h_deriv1_matrice = { 'B5': [28.0, 32.8, 33.6, 36.4, 35.5, 35.6, 40.1, 40.9, 45.9, 49.7, 52.2], 'B4': [33.9, 37.2, 38.6, 40.7, 39.7, 43.0, 46.2, 47.1, 51.9, 55.9, 61.1], 'B3': [36.4, 41.3, 39.1, 41.8, 44.6, 46.1, 50.3, 47.9, 47.6, 57.0, 58.5], 'B2': [40.7, 40.6, 40.9, 44.6, 48.0, 48.4, 48.5, 53.5, 53.0, 54.8, 53.3], 'B1': [37.5, 41.4, 48.0, 46.3, 48.5, 49.1, 53.7, 53.4, 56.4, 56.7, 62.8], 'N0': [47.0, 44.3, 45.6, 47.0, 52.9, 52.2, 55.7, 53.0, 57.6, 58.1, 63.4], 'H1': [44.1, 46.2, 49.4, 49.3, 52.2, 53.7, 58.2, 57.1, 59.0, 61.6, 61.3], 'H2': [51.0, 44.7, 49.4, 51.3, 54.9, 57.9, 56.7, 58.1, 60.3, 60.6, 65.6], 'H3': [50.5, 48.3, 49.9, 60.4, 57.8, 56.3, 60.2, 61.9, 62.2, 65.3, 68.3], 'H4': [43.1, 53.6, 58.1, 61.4, 58.7, 62.6, 61.3, 65.4, 67.5, 68.2, 71.4], 'H5': [56.6, 56.2, 57.7, 63.8, 64.8, 64.7, 66.5, 68.8, 70.9, 72.8, 76.6], } ema_volume_mid_smooth_1h_deriv1_matrice_df = pd.DataFrame(ema_volume_mid_smooth_1h_deriv1_matrice, index=index_labels) # Extraction de la matrice numérique ema_volume_mid_smooth_1h_deriv1_numeric_matrice = ema_volume_mid_smooth_1h_deriv1_matrice_df.reindex(index=ordered_labels, columns=ordered_labels).values # paliers = {} # ========================================================================= # Parameters hyperopt # buy_mid_smooth_3_deriv1 = DecimalParameter(-0.1, 0.1, decimals=2, default=-0.06, space='buy') # buy_mid_smooth_24_deriv1 = DecimalParameter(-0.6, 0, decimals=2, default=-0.03, space='buy') buy_horizon_predict_1h = IntParameter(1, 6, default=2, space='buy') # buy_level_predict_1h = IntParameter(2, 5, default=4, space='buy') should_enter_trade_count = 0 def confirm_trade_entry(self, pair: str, order_type: str, amount: float, rate: float, time_in_force: str, current_time: datetime, entry_tag: Optional[str], **kwargs) -> bool: minutes = 0 if self.pairs[pair]['last_date'] != 0: minutes = round(int((current_time - self.pairs[pair]['last_date']).total_seconds() / 60)) dataframe, _ = self.dp.get_analyzed_dataframe(pair, self.timeframe) last_candle = dataframe.iloc[-1].squeeze() last_candle_2 = dataframe.iloc[-2].squeeze() last_candle_3 = dataframe.iloc[-3].squeeze() # val = self.getProbaHausse144(last_candle) # allow_to_buy = True #(not self.stop_all) #& (not self.all_down) allow_to_buy = not self.pairs[pair]['stop'] # and val > self.buy_val.value #not last_candle['tendency'] in ('B-', 'B--') # (rate <= float(limit)) | (entry_tag == 'force_entry') # if allow_to_buy: # poly_func, x_future, y_future, count = self.polynomial_forecast( # dataframe['mid_smooth_12'], # window=self.buy_horizon_predict_1h.value * 12, # degree=4, # n_future=3) # # if count < 3: # allow_to_buy = False if not self.should_enter_trade(pair, last_candle): allow_to_buy = False if allow_to_buy: self.trades = list() self.pairs[pair]['first_buy'] = rate self.pairs[pair]['last_buy'] = rate self.pairs[pair]['max_touch'] = last_candle['close'] self.pairs[pair]['last_candle'] = last_candle self.pairs[pair]['count_of_buys'] = 1 self.pairs[pair]['current_profit'] = 0 self.pairs[pair]['last_palier_index'] = -1 dispo = round(self.wallets.get_available_stake_amount()) self.printLineLog() stake_amount = self.adjust_stake_amount(pair, last_candle) self.pairs[pair]['total_amount'] = stake_amount self.log_trade( last_candle=last_candle, date=current_time, action=("Buy" if allow_to_buy else "Canceled") + " " + str(minutes), pair=pair, rate=rate, dispo=dispo, profit=0, trade_type=entry_tag, buys=1, stake=round(stake_amount, 2) ) return allow_to_buy def confirm_trade_exit(self, pair: str, trade: Trade, order_type: str, amount: float, rate: float, time_in_force: str, exit_reason: str, current_time, **kwargs, ) -> bool: # allow_to_sell = (minutes > 30) dataframe, _ = self.dp.get_analyzed_dataframe(pair, self.timeframe) last_candle = dataframe.iloc[-1].squeeze() allow_to_sell = (last_candle['percent'] < 0) minutes = int(round((current_time - trade.date_last_filled_utc).total_seconds() / 60, 0)) if allow_to_sell: self.trades = list() self.pairs[pair]['last_count_of_buys'] = trade.nr_of_successful_entries # self.pairs[pair]['count_of_buys'] self.pairs[pair]['last_sell'] = rate self.pairs[pair]['last_trade'] = trade self.pairs[pair]['last_candle'] = last_candle self.pairs[pair]['max_profit'] = 0 self.trades = list() dispo = round(self.wallets.get_available_stake_amount()) # print(f"Sell {pair} {current_time} {exit_reason} dispo={dispo} amount={amount} rate={rate} open_rate={trade.open_rate}") self.log_trade( last_candle=last_candle, date=current_time, action="Sell " + str(minutes), pair=pair, trade_type=exit_reason, rate=last_candle['close'], dispo=dispo, profit=round(trade.calc_profit(rate, amount), 2) ) self.pairs[pair]['total_amount'] = 0 self.pairs[pair]['count_of_buys'] = 0 self.pairs[pair]['max_touch'] = 0 self.pairs[pair]['last_buy'] = 0 self.pairs[pair]['last_date'] = current_time self.pairs[pair]['last_palier_index'] = -1 return (allow_to_sell) | (exit_reason == 'force_exit') def custom_stake_amount(self, pair: str, current_time: datetime, current_rate: float, proposed_stake: float, min_stake: float, max_stake: float, **kwargs) -> float: dataframe, _ = self.dp.get_analyzed_dataframe(pair=pair, timeframe=self.timeframe) current_candle = dataframe.iloc[-1].squeeze() adjusted_stake_amount = self.adjust_stake_amount(pair, current_candle) # print(f"{pair} adjusted_stake_amount{adjusted_stake_amount}") # Use default stake amount. return adjusted_stake_amount def custom_exit(self, pair: str, trade: Trade, current_time, current_rate, current_profit, **kwargs): dataframe, _ = self.dp.get_analyzed_dataframe(pair, self.timeframe) last_candle = dataframe.iloc[-1].squeeze() last_candle_1h = dataframe.iloc[-13].squeeze() before_last_candle = dataframe.iloc[-2].squeeze() before_last_candle_2 = dataframe.iloc[-3].squeeze() before_last_candle_12 = dataframe.iloc[-13].squeeze() before_last_candle_24 = dataframe.iloc[-25].squeeze() expected_profit = self.expectedProfit(pair, last_candle) # print(f"current_time={current_time} current_profit={current_profit} expected_profit={expected_profit}") max_touch_before = self.pairs[pair]['max_touch'] self.pairs[pair]['last_max'] = max(last_candle['haclose'], self.pairs[pair]['last_max']) self.pairs[pair]['last_min'] = min(last_candle['haclose'], self.pairs[pair]['last_min']) count_of_buys = trade.nr_of_successful_entries # baisse = self.pairs[pair]['max_profit'] - current_profit # mx = self.pairs[pair]['max_profit'] / 5 self.pairs[pair]['count_of_buys'] = count_of_buys self.pairs[pair]['current_profit'] = current_profit self.pairs[pair]['max_profit'] = max(self.pairs[pair]['max_profit'], current_profit) # if (last_candle['mid_smooth_deriv1'] >= 0): # return None # if (last_candle['tendency'] in ('H++', 'H+')) and (last_candle['rsi'] < 80): # return None # # if (last_candle['sma20_deriv1'] < 0 and before_last_candle['sma20_deriv1'] >= 0) and (current_profit > expected_profit): # return 'Drv_' + str(count_of_buys) pair_name = self.getShortName(pair) if 1 <= count_of_buys <= 3: if ((before_last_candle_2['mid_smooth_3_deriv1'] <= before_last_candle['mid_smooth_3_deriv1']) & (before_last_candle['mid_smooth_3_deriv1'] >= last_candle['mid_smooth_3_deriv1'])) \ and (current_profit > expected_profit): return 'Drv3_' + pair_name + '_' + str(count_of_buys) if 4 <= count_of_buys <= 6: if ((before_last_candle_2['mid_smooth_12_deriv1'] <= before_last_candle['mid_smooth_12_deriv1']) & (before_last_candle['mid_smooth_12_deriv1'] >= last_candle['mid_smooth_12_deriv1'])) \ and (current_profit > expected_profit): return 'Drv13_' + pair_name + '_' + str(count_of_buys) if 7 <= count_of_buys: if ((before_last_candle_24['sma24_deriv1_1h'] <= before_last_candle_12['sma24_deriv1_1h']) & (before_last_candle_12['sma24_deriv1_1h'] >= last_candle['sma24_deriv1_1h'])) \ and (current_profit > expected_profit): return 'Drv24_' + pair_name + '_' + str(count_of_buys) # if (baisse > mx) & (current_profit > expected_profit): # self.trades = list() # return 'mx_' + str(count_of_buys) # if (last_candle['percent12'] <= -0.01) & (current_profit >= expected_profit): # self.trades = list() # return 'pft_' + str(count_of_buys) self.pairs[pair]['max_touch'] = max(last_candle['haclose'], self.pairs[pair]['max_touch']) def getShortName(self, pair): return pair.replace("/USDT", '').replace("/USDC", '') def informative_pairs(self): # get access to all pairs available in whitelist. pairs = self.dp.current_whitelist() informative_pairs = [(pair, '1d') for pair in pairs] informative_pairs += [(pair, '1h') for pair in pairs] return informative_pairs from typing import List def multi_step_interpolate(self, pct: float, thresholds: List[float], factors: List[float]) -> float: if pct <= thresholds[0]: return factors[0] if pct >= thresholds[-1]: return factors[-1] for i in range(1, len(thresholds)): if pct <= thresholds[i]: # interpolation linéaire entre thresholds[i-1] et thresholds[i] return factors[i - 1] + (pct - thresholds[i - 1]) * (factors[i] - factors[i - 1]) / ( thresholds[i] - thresholds[i - 1]) # Juste au cas où (devrait jamais arriver) return factors[-1] # def interpolate_factor(self, pct: float, start_pct: float = 5, end_pct: float = 30, # start_factor: float = 1.0, end_factor: float = 2.0) -> float: # if pct <= start_pct: # return start_factor # if pct >= end_pct: # return end_factor # # interpolation linéaire # return start_factor + (pct - start_pct) * (end_factor - start_factor) / (end_pct - start_pct) def log_trade(self, action, pair, date, trade_type=None, rate=None, dispo=None, profit=None, buys=None, stake=None, last_candle=None): # Afficher les colonnes une seule fois if self.config.get('runmode') == 'hyperopt': return if self.columns_logged % 30 == 0: self.printLog( f"| {'Date':<16} | {'Action':<10} |{'Pair':<5}| {'Trade Type':<18} |{'Rate':>8} | {'Dispo':>6} | {'Profit':>8} | {'Pct':>6} | {'max_touch':>11} | {'last_lost':>12} | {'last_max':>7}|{'Buys':>4}| {'Stake':>5} |" f"Tdc|{'val':>6}| smooth|smoodrv|Distmax|" ) self.printLineLog() df = pd.DataFrame.from_dict(self.pairs, orient='index') colonnes_a_exclure = ['last_candle', 'last_trade', 'last_palier_index', 'stop', 'trade_info', 'last_date', 'expected_profit', 'last_count_of_buys', 'base_stake_amount', 'stop_buy'] df_filtered = df[df['count_of_buys'] > 0].drop(columns=colonnes_a_exclure) # df_filtered = df_filtered["first_buy", "last_max", "max_touch", "last_sell","last_buy", 'count_of_buys', 'current_profit'] print(df_filtered) self.columns_logged += 1 date = str(date)[:16] if date else "-" limit = None # if buys is not None: # limit = round(last_rate * (1 - self.fibo[buys] / 100), 4) rsi = '' rsi_pct = '' # if last_candle is not None: # if (not np.isnan(last_candle['rsi_1d'])) and (not np.isnan(last_candle['rsi_1h'])): # rsi = str(int(last_candle['rsi_1d'])) + " " + str(int(last_candle['rsi_1h'])) # if (not np.isnan(last_candle['rsi_pct_1d'])) and (not np.isnan(last_candle['rsi_pct_1h'])): # rsi_pct = str(int(10000 * last_candle['bb_mid_pct_1d'])) + " " + str( # int(last_candle['rsi_pct_1d'])) + " " + str(int(last_candle['rsi_pct_1h'])) # first_rate = self.percent_threshold.value # last_rate = self.threshold.value # action = self.color_line(action, action) sma5_1d = '' sma5_1h = '' sma5 = str(sma5_1d) + ' ' + str(sma5_1h) last_lost = round((last_candle['haclose'] - self.pairs[pair]['max_touch']) / self.pairs[pair]['max_touch'], 3) if buys is None: buys = '' max_touch = '' # round(last_candle['max12_1d'], 1) #round(self.pairs[pair]['max_touch'], 1) pct_max = self.getPctFirstBuy(pair, last_candle) total_counts = str(buys) + '/' + str(sum(pair_data['count_of_buys'] for pair_data in self.pairs.values())) dist_max = round(100 * (last_candle['max12_1d'] - last_candle['min12_1d']) / last_candle['min12_1d'], 0) # if trade_type is not None: # if np.isnan(last_candle['rsi_1d']): # string = ' ' # else: # string = (str(int(last_candle['rsi_1d']))) + " " + str(int(last_candle['rsi_deriv1_1d'])) # trade_type = trade_type \ # + " " + string \ # + " " + str(int(last_candle['rsi_1h'])) \ # + " " + str(int(last_candle['rsi_deriv1_1h'])) # val144 = self.getProbaHausse144(last_candle) # val1h = self.getProbaHausse1h(last_candle) val = self.getProbaHausse(last_candle) self.printLog( f"| {date:<16} | {action:<10} | {pair[0:3]:<3} | {trade_type or '-':<18} |{rate or '-':>9}| {dispo or '-':>6} " f"| {profit or '-':>8} | {pct_max or '-':>6} | {round(self.pairs[pair]['max_touch'], 2) or '-':>11} | {last_lost or '-':>12} " f"| {int(self.pairs[pair]['last_max']) or '-':>7} |{total_counts or '-':>4}|{stake or '-':>7}" f"|{last_candle['tendency_12'] or '-':>3}|" # {last_candle['tendency_1h'] or '-':>3}|{last_candle['tendency_1d'] or '-':>3}" # f"|{round(last_candle['mid_smooth_24_deriv1'],3) or '-':>6}|{round(last_candle['mid_smooth_1h_deriv1'],3) or '-':>6}|{round(last_candle['mid_smooth_deriv1_1d'],3) or '-' :>6}|" # f"{round(last_candle['mid_smooth_24_deriv2'],3) or '-' :>6}|{round(last_candle['mid_smooth_1h_deriv2'],3) or '-':>6}|{round(last_candle['mid_smooth_deriv2_1d'],3) or '-':>6}|" f"{round(val, 1) or '-' :>6}|" f"{round(last_candle['mid_smooth_12'], 4) or '-' :>7}|{round(last_candle['mid_smooth_12_deriv1'], 4) or '-' :>7}|{dist_max:>7}" ) def printLineLog(self): # f"sum1h|sum1d|Tdc|Tdh|Tdd| drv1 |drv_1h|drv_1d|" self.printLog( f"+{'-' * 18}+{'-' * 12}+{'-' * 5}+{'-' * 20}+{'-' * 9}+{'-' * 8}+{'-' * 10}+{'-' * 8}+{'-' * 13}+{'-' * 14}+{'-' * 9}+{'-' * 4}+{'-' * 7}+" f"{'-' * 3}" # "+{'-' * 3}+{'-' * 3} f"+{'-' * 6}+{'-' * 7}+{'-' * 7}+{'-' * 7}+" ) def printLog(self, str): if not self.dp.runmode.value in ('backtest', 'hyperopt', 'lookahead-analysis'): logger.info(str) else: if not self.dp.runmode.value in ('hyperopt'): print(str) def add_tendency_column(self, dataframe: pd.DataFrame, suffixe='') -> pd.DataFrame: def tag_by_derivatives(row): d1 = row[f"mid_smooth{suffixe}_deriv1"] d2 = row[f"mid_smooth{suffixe}_deriv2"] d1_lim_inf = -0.01 d1_lim_sup = 0.01 if d1 >= d1_lim_inf and d1 <= d1_lim_sup: # and d2 >= d2_lim_inf and d2 <= d2_lim_sup: return 'P' # Palier if d1 == 0.0: return 'DH' if d2 > 0 else 'DB' # Depart Hausse / Départ Baisse if d1 > d1_lim_sup: return 'H++' if d2 > 0 else 'H+' # Acceleration Hausse / Ralentissement Hausse if d1 < d1_lim_inf: return 'B--' if d2 < 0 else 'B-' # Accéleration Baisse / Ralentissement Baisse return 'Mid' dataframe[f"tendency{suffixe}"] = dataframe.apply(tag_by_derivatives, axis=1) return dataframe def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame: # Add all ta features pair = metadata['pair'] heikinashi = qtpylib.heikinashi(dataframe) dataframe['haopen'] = heikinashi['open'] dataframe['haclose'] = heikinashi['close'] dataframe['hapercent'] = (dataframe['haclose'] - dataframe['haopen']) / dataframe['haclose'] dataframe['hapercent3'] = (dataframe['haclose'] - dataframe['haopen'].shift(3)) / dataframe['haclose'].shift(3) dataframe['sma5'] = talib.SMA(dataframe, timeperiod=5) dataframe['sma10'] = talib.SMA(dataframe, timeperiod=10) self.calculeDerivees(dataframe, 'sma10') dataframe['sma20'] = talib.SMA(dataframe, timeperiod=20) self.calculeDerivees(dataframe, 'sma20') dataframe['sma144'] = talib.SMA(dataframe, timeperiod=144) self.calculeDerivees(dataframe, 'sma144') dataframe["percent"] = (dataframe["close"] - dataframe["open"]) / dataframe["open"] dataframe["percent3"] = (dataframe["close"] - dataframe["open"].shift(3)) / dataframe["open"].shift(3) dataframe["percent5"] = (dataframe["close"] - dataframe["open"].shift(5)) / dataframe["open"].shift(5) dataframe["percent12"] = (dataframe["close"] - dataframe["open"].shift(12)) / dataframe["open"].shift(12) dataframe = self.calculateDerivation(dataframe, window=3, suffixe="_3") dataframe["mid_re_smooth_3"] = self.conditional_smoothing(dataframe['mid_smooth_3'].dropna(), threshold=0.0005).dropna() self.calculeDerivees(dataframe, "mid_re_smooth_3") dataframe = self.calculateDerivation(dataframe, window=12, suffixe="_12") dataframe = self.calculateDerivation(dataframe, window=24, suffixe="_24", factor_1=1000, factor_2=10) # print(metadata['pair']) dataframe['rsi'] = talib.RSI(dataframe['close'], timeperiod=14) self.calculeDerivees(dataframe, 'rsi') # Bollinger Bands bollinger = qtpylib.bollinger_bands(qtpylib.typical_price(dataframe), window=20, stds=2) dataframe['bb_lowerband'] = bollinger['lower'] dataframe['bb_middleband'] = bollinger['mid'] dataframe['bb_upperband'] = bollinger['upper'] dataframe["bb_percent"] = ( (dataframe["close"] - dataframe["bb_lowerband"]) / (dataframe["bb_upperband"] - dataframe["bb_lowerband"]) ) dataframe["bb_width"] = ((dataframe["bb_upperband"] - dataframe["bb_lowerband"]) / dataframe["bb_upperband"]) # Normalization # dataframe = self.calculateRegression(dataframe, column='mid_smooth', window=24, degree=4, future_offset=12) # dataframe = self.calculateRegression(dataframe, column='mid_smooth_24', window=24, degree=4, future_offset=12) ################### INFORMATIVE 1h informative = self.dp.get_pair_dataframe(pair=metadata['pair'], timeframe="1h") heikinashi = qtpylib.heikinashi(informative) informative['haopen'] = heikinashi['open'] informative['haclose'] = heikinashi['close'] informative['hapercent'] = (informative['haclose'] - informative['haopen']) / informative['haclose'] # informative = self.calculateDerivation(informative, window=12) # informative = self.apply_regression_derivatives(informative, column='mid', window=5, degree=4) informative['volatility'] = talib.STDDEV(informative['close'], timeperiod=14) / informative['close'] self.calculeDerivees(informative, 'volatility') informative['atr'] = (talib.ATR(informative['high'], informative['low'], informative['close'], timeperiod=14)) / \ informative['close'] self.calculeDerivees(informative, 'atr') informative['rsi'] = talib.RSI(informative['close']) # , timeperiod=7) informative['sma5'] = talib.SMA(informative, timeperiod=5) informative['sma24'] = talib.SMA(informative, timeperiod=24) self.calculeDerivees(informative, 'sma5') self.calculeDerivees(informative, 'sma24') # informative["mid_re_smooth"] = self.conditional_smoothing(informative['mid_smooth'].dropna(), threshold=0.0005).dropna() # self.calculeDerivees(informative, "mid_re_smooth") # self.calculateDownAndUp(informative, limit=0.0012) # informative['futur_percent_3'] = 100 * ((informative['sma5'].shift(-3) - informative['sma5']) / informative['sma5']) # if self.dp.runmode.value in ('backtest'): # print("##################") # print("# STAT HOUR") # print("##################") # self.calculateStats(informative, 'sma5_deriv1', 'futur_percent_3') dataframe = merge_informative_pair(dataframe, informative, self.timeframe, "1h", ffill=True) ################### INFORMATIVE 1d informative = self.dp.get_pair_dataframe(pair=metadata['pair'], timeframe="1d") # informative = self.calculateDerivation(informative, window=5, factor_1=10000, factor_2=1000) # informative['volatility'] = talib.STDDEV(informative['close'], timeperiod=14) / informative['close'] # informative['atr'] = (talib.ATR(informative['high'], informative['low'], informative['close'], timeperiod=14)) / informative['close'] # informative = self.apply_regression_derivatives(informative, column='mid', window=5, degree=4) informative['max12'] = talib.MAX(informative['close'], timeperiod=12) informative['max60'] = talib.MAX(informative['close'], timeperiod=60) informative['min12'] = talib.MIN(informative['close'], timeperiod=12) informative['min60'] = talib.MIN(informative['close'], timeperiod=60) # informative['rsi'] = talib.RSI(informative['close']) #, timeperiod=7) # self.calculeDerivees(informative, 'rsi') # informative['sma5'] = talib.SMA(informative, timeperiod=5) informative['sma14'] = talib.SMA(informative, timeperiod=14) self.calculeDerivees(informative, 'sma5', factor_1=10, factor_2=1) # informative['futur_percent_3'] = 100 * ((informative['sma5'].shift(-3) - informative['sma5']) / informative['sma5']) # if self.dp.runmode.value in ('backtest'): # print("##################") # print("# STAT DAY") # print("##################") # self.calculateStats(informative, 'sma5_deriv1', 'futur_percent_3') # informative['close_smooth'] = self.conditional_smoothing(informative['mid'].dropna(), threshold=0.0015).dropna() # informative['smooth'], informative['deriv1'], informative['deriv2'] = self.smooth_and_derivatives(informative['close_smooth']) # informative['deriv1'] = 100 * informative['deriv1'] / informative['mid'] # informative['deriv2'] = 1000 * informative['deriv2'] / informative['mid'] # poly_func, x_future, y_future, count = self.polynomial_forecast(informative['sma5_deriv1_1d'], window=24, degree=4) dataframe = merge_informative_pair(dataframe, informative, self.timeframe, "1d", ffill=True) dataframe['last_price'] = dataframe['close'] dataframe['first_price'] = dataframe['close'] # dataframe['mid_price'] = (dataframe['last_price'] + dataframe['first_price']) / 2 # dataframe['close01'] = dataframe.iloc[-1]['close'] * 1.01 # dataframe['limit'] = dataframe['close'] count_buys = 0 if self.dp: if self.dp.runmode.value in ('live', 'dry_run'): self.getOpenTrades() for trade in self.trades: if trade.pair != pair: continue filled_buys = trade.select_filled_orders('buy') count = 0 amount = 0 for buy in filled_buys: if count == 0: dataframe['first_price'] = buy.price self.pairs[pair]['first_buy'] = buy.price # dataframe['close01'] = buy.price * 1.01 # Order(id=2396, trade=1019, order_id=29870026652, side=buy, filled=0.00078, price=63921.01, # status=closed, date=2024-08-26 02:20:11) dataframe['last_price'] = buy.price self.pairs[pair]['last_buy'] = buy.price count = count + 1 amount += buy.price * buy.filled # dataframe['mid_price'] = (dataframe['last_price'] + dataframe['first_price']) / 2 count_buys = count # dataframe['limit'] = dataframe['last_price'] * (1 - self.baisse[count] / 100) # dataframe['amount'] = amount # dataframe['mid_smooth_tag'] = qtpylib.crossed_below(dataframe['mid_smooth_24_deriv1'], dataframe['mid_smooth_deriv2_24']) # =============================== # lissage des valeurs horaires dataframe['mid_smooth_1h'] = dataframe['mid'].rolling(window=6).mean() dataframe["mid_smooth_1h_deriv1"] = 100 * dataframe["mid_smooth_1h"].diff() / dataframe['mid_smooth_1h'] dataframe["mid_smooth_1h_deriv2"] = 10 * dataframe["mid_smooth_1h_deriv1"].diff() # Compter les baisses / hausses consécutives self.calculateDownAndUp(dataframe, limit=0.0001) # dataframe["mid_re_smooth_1h"] = self.conditional_smoothing(dataframe['mid_smooth_1h'].dropna(), threshold=0.0005).dropna() # self.calculeDerivees(dataframe, "mid_re_smooth_1h") # dataframe['close_smooth_1h'] = self.conditional_smoothing(dataframe['mid'].rolling(window=3).mean().dropna(), threshold=0.0005) # dataframe['smooth_1h'], dataframe['deriv1_1h'], dataframe['deriv2_1h'] = self.smooth_and_derivatives(dataframe['close_smooth_1h']) # dataframe['deriv1_1h'] = 100 * dataframe['deriv1_1h'] / dataframe['mid_smooth_1h'] # dataframe['deriv2_1h'] = 1000 * dataframe['deriv2_1h'] / dataframe['mid_smooth_1h'] horizon_h = 12 dataframe['sma5_1h'] = dataframe['sma5_1h'].rolling(window=horizon_h).mean() # dataframe['ema_volume'] = dataframe['ema_volume'].rolling(window=horizon_h).mean() # dataframe['sma24_1h'] = dataframe['sma24_1h'].rolling(window=horizon_h).mean() # dataframe['sma24_deriv1_1h'] = dataframe['sma24_deriv1_1h'].rolling(window=horizon_h).mean() # dataframe = self.calculateRegression(dataframe, column='mid_smooth_1h', window=horizon_h * 12, degree=4, future_offset=24) # Suppose que df['close'] est ton prix de clôture # dataframe['close_smooth_24'] = self.conditional_smoothing(dataframe['mid'].rolling(24).mean().dropna(), threshold=0.0015) # dataframe['smooth_24'], dataframe['smooth_24_deriv1'], dataframe['smooth_24_deriv2'] = self.smooth_and_derivatives(dataframe['close_smooth_24']) # dataframe['smooth_24_deriv1'] = 100 * dataframe['smooth_24_deriv1'] / dataframe['mid_smooth_24'] # dataframe['smooth_24_deriv2'] = 100 * dataframe['smooth_24_deriv2'] / dataframe['mid_smooth_24'] dataframe['close_smooth'] = self.conditional_smoothing(dataframe['mid'].rolling(3).mean().dropna(), threshold=0.001) dataframe['smooth'], dataframe['deriv1'], dataframe['deriv2'] = self.smooth_and_derivatives( dataframe['close_smooth']) dataframe['deriv1'] = 100 * dataframe['deriv1'] / dataframe['mid'] dataframe['deriv2'] = 100 * dataframe['deriv2'] / dataframe['mid'] # =============================== # Lissage des valeurs Journalières horizon_d = 12 * 5 * 24 # dataframe['mid_smooth_1d'] = dataframe['mid_smooth_1d'].rolling(window=horizon_d * 5).mean() # dataframe["mid_smooth_deriv1_1d"] = dataframe["mid_smooth_1d"].rolling(horizon_d).mean().diff() / horizon_d # dataframe["mid_smooth_deriv2_1d"] = horizon_d * dataframe["mid_smooth_deriv1_1d"].rolling(horizon_d).mean().diff() # # dataframe['sma5_1d'] = dataframe['sma5_1d'].rolling(window=horizon_d).mean() # dataframe['sma5_deriv1_1d'] = dataframe['sma5_deriv1_1d'].rolling(window=horizon_d).mean() # dataframe['sma24_1d'] = dataframe['sma24_1d'].rolling(window=horizon_d).mean() # dataframe['sma24_deriv1_1d'] = dataframe['sma24_deriv1_1d'].rolling(window=horizon_d).mean() # dataframe = self.calculateRegression(dataframe, column='mid_smooth_1d', window=24, degree=4, future_offset=12) # dataframe['percent_with_previous_day'] = 100 * (dataframe['close'] - dataframe['close_1d']) / dataframe['close'] # dataframe['percent_with_max_hour'] = 100 * (dataframe['close'] - dataframe['max12_1h']) / dataframe['close'] # # horizon_h = 24 * 5 # dataframe['futur_percent_1h'] = 100 * ((dataframe['mid_smooth_1h'].shift(-12) - dataframe['mid_smooth_1h']) / dataframe['mid_smooth_1h']).rolling(horizon_h).mean() # dataframe['futur_percent_3h'] = 100 * ((dataframe['close'].shift(-36) - dataframe['close']) / dataframe['close']).rolling(horizon_h).mean() # dataframe['futur_percent_5h'] = 100 * ((dataframe['mid_smooth_1h'].shift(-60) - dataframe['mid_smooth_1h']) / dataframe['mid_smooth_1h']).rolling(horizon_h).mean() # dataframe['futur_percent_12h'] = 100 * ((dataframe['mid_smooth_1h'].shift(-144) - dataframe['mid_smooth_1h']) / dataframe['mid_smooth_1h']).rolling(horizon_h).mean() # # dataframe['futur_percent_1d'] = 100 * (dataframe['close'].shift(-1) - dataframe['close']) / dataframe['close'] # dataframe['futur_percent_3d'] = 100 * (dataframe['close'].shift(-3) - dataframe['close']) / dataframe['close'] # # self.calculateProbabilite2Index(dataframe, ['futur_percent_1d'], 'sma24_deriv1_1h', 'sma5_1d') dataframe['ema_volume'] = 20 * (dataframe['volume'] * dataframe['hapercent']) / ( abs(dataframe['volume'].shift(1)) + abs(dataframe['volume'].shift(2))) self.calculeDerivees(dataframe, 'ema_volume', factor_1=10, factor_2=1) # if self.dp.runmode.value in ('backtest'): # print("##################") # print("# STAT DAY vs HOUR") # print("##################") # self.calculateProbabilite2Index(dataframe, futur_cols=['futur_percent_3h'], indic_1='ema_volume', # indic_2='mid_smooth_1h_deriv1') dataframe['proba_hausse'] = dataframe.apply(lambda row: self.getProbaHausse(row), axis=1) return dataframe def calculeDerivees(self, dataframe, indic, factor_1=100, factor_2=10): dataframe[f"{indic}_deriv1"] = factor_1 * dataframe[f"{indic}"].diff() / dataframe[f"{indic}"] dataframe[f"{indic}_deriv2"] = factor_2 * dataframe[f"{indic}_deriv1"].diff() def calculateDownAndUp(self, dataframe, limit=0.0001): dataframe['down'] = dataframe['mid_smooth_1h_deriv1'] < limit # dataframe['hapercent'] <= limit dataframe['up'] = dataframe['mid_smooth_1h_deriv1'] > limit # dataframe['hapercent'] >= limit dataframe['down_count'] = - dataframe['down'].astype(int) * ( dataframe['down'].groupby((dataframe['down'] != dataframe['down'].shift()).cumsum()).cumcount() + 1) dataframe['up_count'] = dataframe['up'].astype(int) * ( dataframe['up'].groupby((dataframe['up'] != dataframe['up'].shift()).cumsum()).cumcount() + 1) # Créer une colonne vide dataframe['down_pct'] = self.calculateUpDownPct(dataframe, 'down_count') dataframe['up_pct'] = self.calculateUpDownPct(dataframe, 'up_count') def calculateDerivation(self, dataframe, window=12, suffixe='', factor_1=100, factor_2=10): dataframe['mid'] = dataframe['haopen'] + (dataframe['haclose'] - dataframe['haopen']) / 2 # 1. Calcul du lissage par moyenne mobile médiane dataframe[f"mid_smooth{suffixe}"] = dataframe['haclose'].rolling(window=window).mean() # 2. Dérivée première = différence entre deux bougies successives dataframe[f"mid_smooth{suffixe}_deriv1"] = round( factor_1 * dataframe[f"mid_smooth{suffixe}"].rolling(window=3).mean().diff() / dataframe[ f"mid_smooth{suffixe}"], 4) # 3. Dérivée seconde = différence de la dérivée première dataframe[f"mid_smooth{suffixe}_deriv2"] = round( factor_2 * dataframe[f"mid_smooth{suffixe}_deriv1"].rolling(window=3).mean().diff(), 4) dataframe = self.add_tendency_column(dataframe, suffixe) return dataframe def getOpenTrades(self): # if len(self.trades) == 0: self.trades = Trade.get_open_trades() return self.trades def populate_buy_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame: pair = metadata['pair'] # self.getOpenTrades() expected_profit = self.expectedProfit(pair, dataframe.iloc[-1]) # self.getBinanceOrderBook(pair, dataframe) last_candle = dataframe.iloc[-1].squeeze() # dataframe.loc[ # ( # (dataframe['percent'] > 0) # & (dataframe['mid_smooth_deriv1'] >= dataframe['mid_smooth_deriv1'].shift(1)) # ), ['enter_long', 'enter_tag']] = (1, 'down') dataframe.loc[ ( # (dataframe['mid_smooth_1h_deriv1'].shift(2) >= dataframe['mid_smooth_1h_deriv1'].shift(1)) # & (dataframe['mid_smooth_1h_deriv1'].shift(1) <= dataframe['mid_smooth_1h_deriv1']) (dataframe['mid_smooth_1h_deriv1'].shift(2) >= dataframe['mid_smooth_1h_deriv1'].shift(1)) & (dataframe['mid_smooth_1h_deriv1'].shift(1) <= dataframe['mid_smooth_1h_deriv1']) & (dataframe['close'] < dataframe['bb_middleband']) ), ['enter_long', 'enter_tag']] = (1, 'smth') dataframe['can_sell'] = np.where(((dataframe['mid_smooth_1h_deriv1'].shift(2) <= dataframe['mid_smooth_1h_deriv1'].shift(1))\ & (dataframe['mid_smooth_1h_deriv1'].shift(1) >= dataframe['mid_smooth_1h_deriv1'])), dataframe['close'], np.nan) dataframe['can_buy'] = np.where(((dataframe['mid_smooth_1h_deriv1'].shift(2) >= dataframe['mid_smooth_1h_deriv1'].shift(1))\ & (dataframe['mid_smooth_1h_deriv1'].shift(1) <= dataframe['mid_smooth_1h_deriv1'])), dataframe['close'], np.nan) dataframe['test'] = np.where(dataframe['enter_long'] == 1, dataframe['close'] * 1.01, np.nan) dataframe['perte_02'] = np.where((dataframe['hapercent3'] * 100 < -0.2), dataframe['close'], np.nan) # self.paliers = self.get_dca_stakes() # if self.dp.runmode.value in ('backtest'): # today = datetime.now().strftime("%Y-%m-%d-%H:%M:%S") # dataframe.to_feather(f"user_data/data/binance/{today}-{metadata['pair'].replace('/', '_')}_df.feather") # dataframe.to_csv(f"user_data/data/binance/{today}-{metadata['pair'].replace('/', '_')}_df.csv") # # df = dataframe # # # Colonnes à traiter # # futur_cols = ['futur_percent_1h', 'futur_percent_3h', 'futur_percent_5h', 'futur_percent_12h'] # futur_cols = ['futur_percent_1h'] # # # Tranches équitables par quantiles # # indic_1 = 'mid_smooth_24_deriv1' # indic_2 = 'sma144_deriv1' # #indic_2 = 'percent_with_max_hour' # # indic_1 = 'mid_smooth_1h_deriv1' # # indic_2 = 'sma5_deriv1_1d' # # self.calculateProbabilite2Index(df, futur_cols, indic_1, indic_2) # if (self.getShortName(pair) == 'BTC'): # for pct in range(0, 75): # factor = self.multi_step_interpolate(pct, self.thresholds, self.factors) # print(f"{pct} => {factor}") return dataframe def calculateProbabilite2Index(self, df, futur_cols, indic_1, indic_2): # # Définition des tranches pour les dérivées # bins_deriv = [-np.inf, -0.05, -0.01, 0.01, 0.05, np.inf] # labels = ['forte baisse', 'légère baisse', 'neutre', 'légère hausse', 'forte hausse'] # # # Ajout des colonnes bin (catégorisation) # df[f"{indic_1}_bin"] = pd.cut(df['mid_smooth_1h_deriv1'], bins=bins_deriv, labels=labels) # df[f"{indic_2}_bin"] = pd.cut(df['mid_smooth_deriv1_1d'], bins=bins_deriv, labels=labels) # # # Colonnes de prix futur à analyser # futur_cols = ['futur_percent_1h', 'futur_percent_2h', 'futur_percent_3h', 'futur_percent_4h', 'futur_percent_5h'] # # # Calcul des moyennes et des effectifs # grouped = df.groupby([f"{indic_2}_bin", f"{indic_1}_bin"])[futur_cols].agg(['mean', 'count']) # # pd.set_option('display.width', 200) # largeur max affichage # pd.set_option('display.max_columns', None) pd.set_option('display.max_columns', None) pd.set_option('display.width', 300) # largeur max affichage # nettoyage # series = df[f"{indic_2}"].dropna() # unique_vals = df[f"{indic_2}"].nunique() # print(unique_vals) # print(df[f"{indic_2}"]) n = len(self.labels) df[f"{indic_1}_bin"], bins_1h = pd.qcut(df[f"{indic_1}"], q=n, labels=self.labels, retbins=True, duplicates='drop') df[f"{indic_2}_bin"], bins_1d = pd.qcut(df[f"{indic_2}"], q=n, labels=self.labels, retbins=True, duplicates='drop') # Affichage formaté pour code Python print(f"Bornes des quantiles pour {indic_1} : [{', '.join([f'{b:.4f}' for b in bins_1h])}]") print(f"Bornes des quantiles pour {indic_2} : [{', '.join([f'{b:.4f}' for b in bins_1d])}]") # Agrégation grouped = df.groupby([f"{indic_2}_bin", f"{indic_1}_bin"], observed=True)[futur_cols].agg(['mean', 'count']) # Affichage with pd.option_context('display.max_rows', None, 'display.max_columns', None): print(grouped.round(4)) # Ajout des probabilités de hausse for col in futur_cols: df[f"{col}_is_up"] = df[col] > 0 # Calcul de la proba de hausse proba_up = df.groupby([f"{indic_2}_bin", f"{indic_1}_bin"], observed=True)[f"{col}_is_up"].mean().unstack() print(f"\nProbabilité de hausse pour {col} (en %):") with pd.option_context('display.max_rows', None, 'display.max_columns', None): print((proba_up * 100).round(1)) # Affichage formaté des valeurs comme tableau Python with pd.option_context('display.max_rows', None, 'display.max_columns', None): df_formatted = (proba_up * 100).round(1) print("data = {") for index, row in df_formatted.iterrows(): row_values = ", ".join([f"{val:.1f}" for val in row]) print(f"'{index}': [{row_values}], ") print("}") def populate_sell_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame: # dataframe.loc[ # ( # (dataframe['mid_smooth_deriv1'] == 0) # & (dataframe['mid_smooth_deriv1'].shift(1) > 0) # ), ['sell', 'exit_long']] = (1, 'sell_sma5_pct_1h') return dataframe def adjust_trade_position(self, trade: Trade, current_time: datetime, current_rate: float, current_profit: float, min_stake: float, max_stake: float, **kwargs): # ne rien faire si ordre deja en cours if trade.has_open_orders: print("skip open orders") return None if (self.wallets.get_available_stake_amount() < 50): # or trade.stake_amount >= max_stake: return 0 dataframe, _ = self.dp.get_analyzed_dataframe(trade.pair, self.timeframe) last_candle = dataframe.iloc[-1].squeeze() before_last_candle_12 = dataframe.iloc[-13].squeeze() before_last_candle_24 = dataframe.iloc[-25].squeeze() last_candle_3 = dataframe.iloc[-4].squeeze() last_candle_previous_1h = dataframe.iloc[-13].squeeze() # prépare les données current_time = current_time.astimezone(timezone.utc) open_date = trade.open_date.astimezone(timezone.utc) dispo = round(self.wallets.get_available_stake_amount()) hours_since_first_buy = (current_time - trade.open_date_utc).seconds / 3600.0 days_since_first_buy = (current_time - trade.open_date_utc).days hours = (current_time - trade.date_last_filled_utc).total_seconds() / 3600.0 count_of_buys = trade.nr_of_successful_entries current_time_utc = current_time.astimezone(timezone.utc) open_date = trade.open_date.astimezone(timezone.utc) days_since_open = (current_time_utc - open_date).days pair = trade.pair pct_first = 0 total_counts = sum(pair_data['count_of_buys'] for pair_data in self.pairs.values() if not pair in ('BTC/USDT', 'BTC/USDC')) if self.pairs[pair]['first_buy']: pct_first = self.getPctFirstBuy(pair, last_candle) pct = 0.012 if count_of_buys == 1: pct_max = current_profit else: if self.pairs[trade.pair]['last_buy']: pct_max = self.getPctLastBuy(pair, last_candle) else: pct_max = - pct if pair in ('BTC/USDT', 'BTC/USDC') or count_of_buys <= 2: lim = - pct - (count_of_buys * 0.001) # lim = - (0.012 * (1 + round(count_of_buys / 5)) + 0.001 * (count_of_buys - 1)) # lim = - (0.012 + 0.001 * (count_of_buys - 1) + (0.002 * count_of_buys if count_of_buys > 10 else 0.001 * count_of_buys if count_of_buys > 5 else 0)) else: pct = 0.05 lim = - pct - (count_of_buys * 0.001) if (len(dataframe) < 1): print("skip dataframe") return None if not self.should_enter_trade(pair, last_candle): return None # if self.dp.runmode.value in ('dry_run'): # if pair not in ('BTC/USDT', 'BTC/USDC', 'XRP/USDT', 'XRP/USDC', 'ETH/USDT', 'ETH/USDC', 'SOL/USDT', 'SOL/USDT'): # # print(f"skip pair {pair}") # return None # else: # if pair not in ('BTC/USDT', 'BTC/USDC'): # btc_count = self.pairs['BTC/USDT']['count_of_buys'] + self.pairs['BTC/USDC']['count_of_buys'] # # print(f"skip pair {pair}") # if (btc_count > 4 or count_of_buys + 1 > btc_count) and pct_max < 0.20: # return None # # déclenche un achat si bougie rouge importante # stake_amount = self.config.get('stake_amount') # stake_amount = min(stake_amount, self.wallets.get_available_stake_amount()) # current_time = current_time.astimezone(timezone.utc) # seconds_since_filled = (current_time - trade.date_last_filled_utc).total_seconds() # pct = (last_candle['close'] - last_candle['open']) / (last_candle['open']) * 100 # if ( # stake_amount # and pct <= - 1.10 #self.red_candle_pct # and min_stake < stake_amount < max_stake # and seconds_since_filled > (60 * 5) # # and (last_candle["sma24_deriv1_1h"] > - 0.02) # # and seconds_since_filled > (1 * 3600) # # and count_of_entries < 10 # ): # trade_type = last_candle['enter_tag'] if last_candle['enter_long'] == 1 else 'pct48' # self.log_trade( # last_candle=last_candle, # date=current_time, # action="Adjust 1", # dispo=dispo, # pair=trade.pair, # rate=current_rate, # trade_type=trade_type, # profit=round(current_profit, 4), # round(current_profit * trade.stake_amount, 2), # buys=trade.nr_of_successful_entries + 1, # stake=round(stake_amount, 2) # ) # # self.pairs[trade.pair]['last_buy'] = current_rate # self.pairs[trade.pair]['max_touch'] = last_candle['close'] # self.pairs[trade.pair]['last_candle'] = last_candle # return stake_amount # # # déclenche un achat en conditions d'achat standard # if ( # stake_amount # and last_candle['close'] < last_candle['sma20'] # and last_candle['close'] < last_candle['open'] # and min_stake < stake_amount < max_stake # and (last_candle["sma24_deriv1_1h"] > - 0.02) # and seconds_since_filled > 23 * 3600 #self.staking_delay * 3600 # ): # stake_amount = stake_amount * seconds_since_filled / (23 * 3600) # trade_type = last_candle['enter_tag'] if last_candle['enter_long'] == 1 else 'pct48' # self.log_trade( # last_candle=last_candle, # date=current_time, # action="Adjust 2", # dispo=dispo, # pair=trade.pair, # rate=current_rate, # trade_type=trade_type, # profit=round(current_profit, 4), # round(current_profit * trade.stake_amount, 2), # buys=trade.nr_of_successful_entries + 1, # stake=round(stake_amount, 2) # ) # # self.pairs[trade.pair]['last_buy'] = current_rate # self.pairs[trade.pair]['max_touch'] = last_candle['close'] # self.pairs[trade.pair]['last_candle'] = last_candle # return stake_amount # # return None # index = self.get_palier_index(pct_first) # if index is None: # return None # index = index -1 # lim, stake_amount = self.paliers[index] #- pct - (count_of_buys * 0.001) # self.get_active_stake() # val144 = self.getProbaHausse144(last_candle) # val1h = self.getProbaHausse1h(last_candle) # val = self.getProbaHausse(last_candle) # buy = False # previous = 0 # # current_profit=-0.001998 count_of_buys=1 pct_first=0.000 pct_palier=-0.629 pct_max=-0.002 lim=0.000 # # for pct_palier, stake_amount in self.paliers: # if abs(pct_palier) > abs(pct_first): # lim = pct_palier # break # previous = pct_palier # print(f"{trade.pair} current_profit={current_profit} count_of_buys={count_of_buys} pct_first={pct_first:.3f} pct_palier={pct_palier:.3f} pct_max={pct_max:.3f} lim={lim:.3f} ") # if (days_since_open > count_of_buys) & (0 < count_of_buys <= max_buys) & (current_rate <= limit) & (last_candle['enter_long'] == 1): # ['mid_smooth_1h_deriv1'] # sans cdt° ==>Avg. stake amount 276.516 USDT │ Total trade volume 175760.342 USDT 315 │ 1.17 │ 1204.156 │ 60.21│ 1 day, 13:45:00 │ 314 0 1 99.7 │ 0.787 USDT 0.02% │ # > - 0.03 ==>Avg. stake amount 259.702 USDT │ Total trade volume 149302.88 USDT 285 │ 1.19 │ 974.542 │ 48.73│ 1 day, 17:45:00 │ 284 0 1 99.6 │ 0.787 USDT 0.03% │ # > - 0.03 ==>Avg. stake amount 253.535 USDT │ Total trade volume 145312.936 USDT 284 │ 1.19 │ 1014.898 │ 50.74| 1 day, 17:54:00 │ 283 0 1 99.6 │ 0.684 USDT 0.02% │ # > - 0.015 ==>Avg. stake amount 249.107 USDT │ Total trade volume 138186.861 USDT 275 │ 1.20 │ 901.976 │ 45.1 │ 1 day, 19:17:00 │ 274 0 1 99.6 │ 0.684 USDT 0.02% condition = (last_candle['sma5_deriv1_1h'] > 0 or count_of_buys <= 5) # last_candle['mid_smooth_1h_deriv1'] > - 0.05 #(last_candle['mid_smooth_3_deriv1'] > self.buy_mid_smooth_3_deriv1.value) and (last_candle['mid_smooth_24_deriv1'] > self.buy_mid_smooth_24_deriv1.value) # (last_candle['enter_long'] == 1 & (count_of_buys < 3)) \ # or ((before_last_candle['mid_re_smooth_3_deriv1'] <= 0) & (last_candle['mid_re_smooth_3_deriv1'] >= 0) & (3 <= count_of_buys < 6)) \ # or ((before_last_candle['mid_smooth_1h_deriv1'] <= 0) & (last_candle['mid_smooth_1h_deriv1'] >= 0) & (6 <= count_of_buys)) limit_buy = 40 if (count_of_buys < limit_buy) and condition and ( pct_max < lim): # and val > self.buy_val_adjust.value and last_candle['mid_smooth_deriv1_1d'] > - 1): try: # if 6 <= count_of_buys: # if not ((before_last_candle_24['sma24_deriv1_1h'] > before_last_candle_12['sma24_deriv1_1h']) # & (before_last_candle_12['sma24_deriv1_1h'] < last_candle['sma24_deriv1_1h'])): # return None # print(f"{trade.pair} current_profit={current_profit} count_of_buys={count_of_buys} pct_first={pct_first:.3f} pct_max={pct_max:.3f} lim={lim:.3f} index={index}") # self.pairs[trade.pair]['last_palier_index'] = index # # Appel de la fonction # poly_func, x_future, y_future, count = self.polynomial_forecast( # dataframe['mid_smooth_12'], # window=self.buy_horizon_predict_1h.value * 12, # degree=4) # # if count < 3: # return None max_amount = self.config.get('stake_amount') * 2.5 # stake_amount = min(stake_amount, self.wallets.get_available_stake_amount()) stake_amount = min(min(max_amount, self.wallets.get_available_stake_amount()), self.adjust_stake_amount(pair, last_candle) - 10 * pct_first / pct) # min(200, self.adjust_stake_amount(pair, last_candle) * self.fibo[count_of_buys]) trade_type = last_candle['enter_tag'] if last_candle['enter_long'] == 1 else 'pct48' self.pairs[trade.pair]['count_of_buys'] += 1 self.pairs[pair]['total_amount'] += stake_amount self.log_trade( last_candle=last_candle, date=current_time, action="Loss -", dispo=dispo, pair=trade.pair, rate=current_rate, trade_type=trade_type, profit=round(current_profit, 4), # round(current_profit * trade.stake_amount, 2), buys=trade.nr_of_successful_entries + 1, stake=round(stake_amount, 2) ) self.pairs[trade.pair]['last_buy'] = current_rate self.pairs[trade.pair]['max_touch'] = last_candle['close'] self.pairs[trade.pair]['last_candle'] = last_candle # df = pd.DataFrame.from_dict(self.pairs, orient='index') # colonnes_a_exclure = ['last_candle', 'last_trade', 'last_palier_index', 'stop', # 'trade_info', 'last_date', 'expected_profit', 'last_count_of_buys', 'base_stake_amount', 'stop_buy'] # df_filtered = df[df['count_of_buys'] > 0].drop(columns=colonnes_a_exclure) # # df_filtered = df_filtered["first_buy", "last_max", "max_touch", "last_sell","last_buy", 'count_of_buys', 'current_profit'] # # print(df_filtered) return stake_amount except Exception as exception: print(exception) return None # if (count_of_buys >= 6): # self.log_trade( # last_candle=last_candle, # date=current_time, # action="Sell", # dispo=dispo, # pair=trade.pair, # rate=current_rate, # trade_type="Stop loss", # profit=round(current_profit, 4), # round(current_profit * trade.stake_amount, 2), # buys=trade.nr_of_successful_entries + 1, # stake=-trade.stake_amount # ) # self.pairs[trade.pair]['last_buy'] = current_rate # self.pairs[trade.pair]['max_touch'] = last_candle['close'] # self.pairs[trade.pair]['last_candle'] = last_candle # return -trade.stake_amount # if (count_of_buys < limit_buy and pct_max > pct and current_profit > 0.004) \ # and (last_candle['rsi_deriv1_1h'] >= -5) \ # and (last_candle['tendency'] in ('P', 'H++', 'DH', 'H+')) \ # and (last_candle['mid_smooth_deriv1'] > 0.015): # try: # trade_type = last_candle['enter_tag'] if last_candle['enter_long'] == 1 else 'pct48' # self.log_trade( # last_candle=last_candle, # date=current_time, # action="Gain +", # dispo=dispo, # pair=trade.pair, # rate=current_rate, # trade_type=trade_type, # profit=round(current_profit, 4), # round(current_profit * trade.stake_amount, 2), # buys=trade.nr_of_successful_entries + 1, # stake=round(stake_amount, 2) # ) # self.pairs[trade.pair]['last_buy'] = current_rate # self.pairs[trade.pair]['max_touch'] = last_candle['close'] # self.pairs[trade.pair]['last_candle'] = last_candle # return stake_amount # except Exception as exception: # print(exception) # return None return None def getPctFirstBuy(self, pair, last_candle): return round((last_candle['close'] - self.pairs[pair]['first_buy']) / self.pairs[pair]['first_buy'], 3) def getPctLastBuy(self, pair, last_candle): return round((last_candle['close'] - self.pairs[pair]['last_buy']) / self.pairs[pair]['last_buy'], 4) def getProbaHausse(self, last_candle): value_1 = self.getValuesFromTable(self.ema_volume, last_candle['ema_volume']) value_2 = self.getValuesFromTable(self.mid_smooth_1h_deriv1, last_candle['mid_smooth_1h_deriv1']) val = self.approx_val_from_bins( matrice=self.ema_volume_mid_smooth_1h_deriv1_matrice_df, numeric_matrice=self.ema_volume_mid_smooth_1h_deriv1_numeric_matrice, row_label=value_2, col_label=value_1 ) return val def adjust_stake_amount(self, pair: str, last_candle: DataFrame): # Calculer le minimum des 14 derniers jours base_stake_amount = self.config.get('stake_amount') # Montant de base configuré if not pair in ('BTC/USDT', 'BTC/USDC'): # factors = [1, 1.2, 1.3, 1.4] adjusted_stake_amount = base_stake_amount else : first_price = self.pairs[pair]['first_buy'] if (first_price == 0): first_price = last_candle['close'] last_max = last_candle['max12_1d'] pct = 5 if last_max > 0: pct = 100 * (last_max - first_price) / last_max factor = self.multi_step_interpolate(pct, self.thresholds, self.factors) adjusted_stake_amount = base_stake_amount * factor # max(base_stake_amount, min(100, base_stake_amount * percent_4)) # pct = 100 * abs(self.getPctFirstBuy(pair, last_candle)) # # factor = self.multi_step_interpolate(pct, self.thresholds, self.factors) return adjusted_stake_amount def expectedProfit(self, pair: str, last_candle: DataFrame): count_of_buys = self.pairs[pair]['count_of_buys'] pct_first = self.getPctFirstBuy(pair, last_candle) expected_profit = max(0.004, abs( pct_first / 3)) # 0.004 + 0.002 * self.pairs[pair]['count_of_buys'] #min(0.01, first_max) # print( # f"Expected profit price={current_price:.4f} min_max={min_max:.4f} min_14={min_14_days:.4f} max_14={max_14_days:.4f} percent={percent:.4f} expected_profit={expected_profit:.4f}") return expected_profit def calculateUpDownPct(self, dataframe, key): down_pct_values = np.full(len(dataframe), np.nan) # Remplir la colonne avec les bons calculs for i in range(len(dataframe)): shift_value = abs(int(dataframe[key].iloc[i])) # Récupérer le shift actuel if i - shift_value > 1: # Vérifier que le shift ne dépasse pas l'index down_pct_values[i] = 100 * (dataframe['close'].iloc[i] - dataframe['close'].iloc[i - shift_value]) / \ dataframe['close'].iloc[i - shift_value] return down_pct_values # ✅ Première dérivée(variation ou pente) # Positive: la courbe est croissante → tendance haussière. # Négative: la courbe est décroissante → tendance baissière. # Proche de 0: la courbe est plate → marché stable ou en transition. # # Applications: # Détecter les points d’inflexion(changement de tendance) quand elle s’annule.\ # Analyser la vitesse d’un mouvement(plus elle est forte, plus le mouvement est impulsif). # # ✅ Seconde dérivée(accélération ou concavité) # Positive: la pente augmente → accélération de la hausse ou ralentissement de la baisse. # Négative: la pente diminue → accélération de la baisse ou ralentissement de la hausse. # Changement de signe: indique souvent un changement de courbure, utile pour prévoir des retournements. # # Exemples: # 🟢 Dérivée 1 > 0 et dérivée 2 > 0: tendance haussière qui s’accélère. # 🟡 Dérivée 1 > 0 et dérivée 2 < 0: tendance haussière qui ralentit → essoufflement potentiel. # 🔴 Dérivée 1 < 0 et dérivée 2 < 0: tendance baissière qui s’accélère. # 🟠 Dérivée 1 < 0 et dérivée 2 > 0: tendance baissière qui ralentit → possible bottom. # # Filtrer les signaux: ne prendre un signal haussier que si dérivée1 > 0 et dérivée2 > 0. # Détecter les zones de retournement: quand dérivée1 ≈ 0 et que dérivée2 change de signe. def calculateRegression(self, dataframe: DataFrame, column='close', window=50, degree=3, future_offset: int = 10 # projection à n bougies après ) -> DataFrame: df = dataframe.copy() regression_fit = [] regression_future_fit = [] regression_fit = [] regression_future_fit = [] for i in range(len(df)): if i < window: regression_fit.append(np.nan) regression_future_fit.append(np.nan) continue # Fin de la fenêtre d’apprentissage end_index = i start_index = i - window y = df[column].iloc[start_index:end_index].values # Si les données sont insuffisantes (juste par précaution) if len(y) < window: regression_fit.append(np.nan) regression_future_fit.append(np.nan) continue # x centré pour meilleure stabilité numérique x = np.linspace(-1, 1, window) coeffs = np.polyfit(x, y, degree) poly = np.poly1d(coeffs) # Calcul point présent (dernier de la fenêtre) x_now = x[-1] regression_fit.append(poly(x_now)) # Calcul point futur, en ajustant si on dépasse la fin remaining = len(df) - i - 1 effective_offset = min(future_offset, remaining) x_future = x_now + (effective_offset / window) * 2 # respect du même pas regression_future_fit.append(poly(x_future)) df[f"{column}_regression"] = regression_fit # 2. Dérivée première = différence entre deux bougies successives df[f"{column}_regression_deriv1"] = round(100 * df[f"{column}_regression"].diff() / df[f"{column}_regression"], 4) # 3. Dérivée seconde = différence de la dérivée première df[f"{column}_regression_deriv2"] = round( 10 * df[f"{column}_regression_deriv1"].rolling(int(window / 4)).mean().diff(), 4) df[f"{column}_future_{future_offset}"] = regression_future_fit # # 2. Dérivée première = différence entre deux bougies successives # df[f"{column}_future_{future_offset}_deriv1"] = round(100 * df[f"{column}_future_{future_offset}"].diff() / df[f"{column}_future_{future_offset}"], 4) # # # 3. Dérivée seconde = différence de la dérivée première # df[f"{column}_future_{future_offset}_deriv2"] = round(10 * df[f"{column}_future_{future_offset}_deriv1"].rolling(int(window / 4)).mean().diff(), 4) return df def getValuesFromTable(self, values, value): for i in range(len(values) - 1): if values[i] <= value < values[i + 1]: return self.labels[i] return self.labels[-1] # cas limite pour la borne max # def interpolated_val_from_bins(self, row_pos, col_pos): # """ # Renvoie une approximation interpolée (bilinéaire) d'une valeur dans la matrice # à partir de positions flottantes dans l'index (ligne) et les colonnes. # # Parameters: # matrix_df (pd.DataFrame): Matrice des probabilités (index/colonnes = labels). # row_pos (float): Position réelle de la ligne (0 = B5, 10 = H5). # col_pos (float): Position réelle de la colonne (0 = B5, 10 = H5). # # Returns: # float: Valeur interpolée, ou NaN si en dehors des bornes. # """ # # # Labels ordonnés # n = len(self.labels) # # # Vérification des limites # if not (0 <= row_pos <= n - 1) or not (0 <= col_pos <= n - 1): # return np.nan # # # Conversion des labels -> matrice # matrix = self.smooth_smadiff_matrice_df.reindex(index=self.labels, columns=self.labels).values # # # Coordonnées entières (inférieures) # i = int(np.floor(row_pos)) # j = int(np.floor(col_pos)) # # # Coefficients pour interpolation # dx = row_pos - i # dy = col_pos - j # # # Précautions sur les bords # if i >= n - 1: i = n - 2; dx = 1.0 # if j >= n - 1: j = n - 2; dy = 1.0 # # # Récupération des 4 valeurs voisines # v00 = matrix[i][j] # v10 = matrix[i + 1][j] # v01 = matrix[i][j + 1] # v11 = matrix[i + 1][j + 1] # # # Interpolation bilinéaire # interpolated = ( # (1 - dx) * (1 - dy) * v00 + # dx * (1 - dy) * v10 + # (1 - dx) * dy * v01 + # dx * dy * v11 # ) # return interpolated def approx_val_from_bins(self, matrice, numeric_matrice, row_label, col_label): """ Renvoie une approximation de la valeur à partir des labels binaires (e.g. B5, H1) en utilisant une interpolation simple basée sur les indices. Parameters: matrix_df (pd.DataFrame): Matrice avec les labels binaires en index et colonnes. row_label (str): Label de la ligne (ex: 'B3'). col_label (str): Label de la colonne (ex: 'H2'). Returns: float: Valeur approchée si possible, sinon NaN. """ # Vérification des labels if row_label not in matrice.index or col_label not in matrice.columns: return np.nan # Index correspondant row_idx = self.label_to_index.get(row_label) col_idx = self.label_to_index.get(col_label) # Approximation directe (aucune interpolation complexe ici, juste une lecture) return numeric_matrice[row_idx, col_idx] @property def protections(self): return [ { "method": "CooldownPeriod", "stop_duration_candles": 12 } # { # "method": "MaxDrawdown", # "lookback_period_candles": self.lookback.value, # "trade_limit": self.trade_limit.value, # "stop_duration_candles": self.protection_stop.value, # "max_allowed_drawdown": self.protection_max_allowed_dd.value, # "only_per_pair": False # }, # { # "method": "StoplossGuard", # "lookback_period_candles": 24, # "trade_limit": 4, # "stop_duration_candles": self.protection_stoploss_stop.value, # "only_per_pair": False # }, # { # "method": "StoplossGuard", # "lookback_period_candles": 24, # "trade_limit": 4, # "stop_duration_candles": 2, # "only_per_pair": False # }, # { # "method": "LowProfitPairs", # "lookback_period_candles": 6, # "trade_limit": 2, # "stop_duration_candles": 60, # "required_profit": 0.02 # }, # { # "method": "LowProfitPairs", # "lookback_period_candles": 24, # "trade_limit": 4, # "stop_duration_candles": 2, # "required_profit": 0.01 # } ] def conditional_smoothing(self, series, threshold=0.002): smoothed = [series.iloc[0]] for val in series.iloc[1:]: last = smoothed[-1] if abs(val - last) / last >= threshold: smoothed.append(val) else: smoothed.append(last) return pd.Series(smoothed, index=series.index) def smooth_and_derivatives(self, series, window=25, polyorder=3): series = series.copy() if series.isna().sum() > 0: series = series.ffill().bfill() # Si tu veux éviter toute NaN smooth = self.causal_savgol(series, window=window, polyorder=polyorder) deriv1 = np.diff(smooth, prepend=smooth[0]) deriv2 = np.diff(deriv1, prepend=deriv1[0]) return pd.Series(smooth, index=series.index), pd.Series(deriv1, index=series.index), pd.Series(deriv2, index=series.index) def causal_savgol(self, series, window=25, polyorder=3): result = [] half_window = window # Fenêtre complète dans le passé for i in range(len(series)): if i < half_window: result.append(np.nan) continue window_series = series[i - half_window:i] if window_series.isna().any(): result.append(np.nan) continue coeffs = np.polyfit(range(window), window_series, polyorder) poly = np.poly1d(coeffs) result.append(poly(window - 1)) return pd.Series(result, index=series.index) def get_stake_from_drawdown(self, pct: float, base_stake: float = 100.0, step: float = 0.04, growth: float = 1.15, max_stake: float = 1000.0) -> float: """ Calcule la mise à allouer en fonction du drawdown. :param pct: Drawdown en pourcentage (ex: -0.12 pour -12%) :param base_stake: Mise de base (niveau 0) :param step: Espacement entre paliers (ex: tous les -4%) :param growth: Facteur de croissance par palier (ex: 1.15 pour +15%) :param max_stake: Mise maximale à ne pas dépasser :return: Montant à miser """ if pct >= 0: return base_stake level = int(abs(pct) / step) stake = base_stake * (growth ** level) return min(stake, max_stake) def compute_adaptive_paliers(self, max_drawdown: float = 0.65, first_steps: list[float] = [0.01, 0.01, 0.015, 0.02], growth: float = 1.2) -> list[float]: """ Génère une liste de drawdowns négatifs avec des paliers plus rapprochés au début. :param max_drawdown: Drawdown max (ex: 0.65 pour -65%) :param first_steps: Liste des premiers paliers fixes en % (ex: [0.01, 0.01, 0.015]) :param growth: Facteur multiplicatif pour espacer les paliers suivants :return: Liste de drawdowns négatifs (croissants) """ paliers = [] cumulated = 0.0 # Étapes initiales rapprochées for step in first_steps: cumulated += step paliers.append(round(-cumulated, 4)) # Étapes suivantes plus espacées step = first_steps[-1] while cumulated < max_drawdown: step *= growth cumulated += step if cumulated >= max_drawdown: break paliers.append(round(-cumulated, 4)) return paliers # def get_dca_stakes(self, # max_drawdown: float = 0.65, # base_stake: float = 100.0, # first_steps: list[float] = [0.01, 0.01, 0.015, 0.015], # growth: float = 1.2, # stake_growth: float = 1.15 # ) -> list[tuple[float, float]]: # """ # Génère les paliers de drawdown et leurs stakes associés. # # :param max_drawdown: Maximum drawdown (ex: 0.65 pour -65%) # :param base_stake: Mise initiale # :param first_steps: Paliers de départ (plus resserrés) # :param growth: Multiplicateur d'espacement des paliers # :param stake_growth: Croissance multiplicative des mises # :return: Liste de tuples (palier_pct, stake) # [(-0.01, 100.0), (-0.02, 115.0), (-0.035, 132.25), (-0.05, 152.09), (-0.068, 174.9), # (-0.0896, 201.14), (-0.1155, 231.31), (-0.1466, 266.0), (-0.1839, 305.9), (-0.2287, 351.79), # (-0.2825, 404.56), (-0.347, 465.24), (-0.4244, 535.03), (-0.5173, 615.28), (-0.6287, 707.57)] # """ # paliers = [ # (-0.01, 100.0), (-0.02, 115.0), (-0.035, 130), (-0.05, 150), (-0.07, 150), # (-0.10, 150), (-0.15, 150), (-0.20, 150), (-0.25, 150), # (-0.30, 200), (-0.40, 200), # (-0.50, 300), (-0.60, 400), (-0.70, 500), (-0.80, 1000) # ] # # # cumulated = 0.0 # # stake = base_stake # # # # # Étapes initiales # # for step in first_steps: # # cumulated += step # # paliers.append((round(-cumulated, 4), round(stake, 2))) # # stake *= stake_growth # # # # # Étapes suivantes # # step = first_steps[-1] # # while cumulated < max_drawdown: # # step *= growth # # cumulated += step # # if cumulated >= max_drawdown: # # break # # paliers.append((round(-cumulated, 4), round(stake, 2))) # # stake *= stake_growth # # return paliers # def get_active_stake(self, pct: float) -> float: # """ # Renvoie la mise correspondant au drawdown `pct`. # # :param pct: drawdown courant (négatif, ex: -0.043) # :param paliers: liste de tuples (drawdown, stake) # :return: stake correspondant # """ # abs_pct = abs(pct) # stake = self.paliers[0][1] # stake par défaut # # for palier, s in self.paliers: # if abs_pct >= abs(palier): # stake = s # else: # break # # return stake # def get_palier_index(self, pct): # """ # Retourne l'index du palier franchi pour un pourcentage de baisse donné (pct). # On cherche le palier le plus profond atteint (dernier franchi). # """ # for i in reversed(range(len(self.paliers))): # seuil, _ = self.paliers[i] # #print(f"pct={pct} seuil={seuil}") # if pct <= seuil: # # print(pct) # return i # return None # Aucun palier atteint # def poly_regression_predictions(self, series: pd.Series, window: int = 20, degree: int = 2, n_future: int = 3) -> pd.DataFrame: # """ # Renvoie une DataFrame avec `n_future` colonnes contenant les extrapolations des n prochains points # selon une régression polynomiale ajustée sur les `window` dernières valeurs. # """ # result = pd.DataFrame(index=series.index) # x = np.arange(window) # # for future_step in range(1, n_future + 1): # result[f'poly_pred_t+{future_step}'] = np.nan # # for i in range(window - 1, len(series)): # y = series.iloc[i - window + 1 : i + 1].values # # if np.any(pd.isna(y)): # continue # # coeffs = np.polyfit(x, y, degree) # poly = np.poly1d(coeffs) # # for future_step in range(1, n_future + 1): # future_x = window - 1 + future_step # Extrapolation point # result.loc[series.index[i], f'poly_pred_t+{future_step}'] = poly(future_x) # # return result def polynomial_forecast(self, series: pd.Series, window: int = 20, degree: int = 2, steps=[12, 24, 36]): """ Calcule une régression polynomiale sur les `window` dernières valeurs de la série, puis prédit les `n_future` prochaines valeurs. :param series: Série pandas (ex: dataframe['close']) :param window: Nombre de valeurs récentes utilisées pour ajuster le polynôme :param degree: Degré du polynôme (ex: 2 pour quadratique) :param n_future: Nombre de valeurs futures à prédire :return: tuple (poly_function, x_vals, y_pred), où y_pred contient les prédictions futures """ if len(series) < window: raise ValueError("La série est trop courte pour la fenêtre spécifiée.") recent_y = series.iloc[-window:].values x = np.arange(window) coeffs = np.polyfit(x, recent_y, degree) poly = np.poly1d(coeffs) x_future = np.arange(window, window + len(steps)) y_future = poly(x_future) # Affichage de la fonction # print("Fonction polynomiale trouvée :") # print(poly) current = series.iloc[-1] count = 0 for future_step in steps: # range(1, n_future + 1) future_x = window - 1 + future_step prediction = poly(future_x) # series.loc[series.index[future_x], f'poly_pred_t+{future_step}'] = prediction # ➕ Afficher les prédictions # print(f"{current} → t+{future_step}: x={future_x}, y={prediction:.2f}") if prediction > 0: # current: count += 1 return poly, x_future, y_future, count # def calculateStats2(self, df, index, target): # # Nombre de tranches (modifiable) # n_bins_indice = 11 # n_bins_valeur = 11 # # # Tranches dynamiques # # df['indice_tranche'] = pd.qcut(df[f"{index}"], q=n_bins_indice, duplicates='drop') # # df['valeur_tranche'] = pd.qcut(df[f"{target}"], q=n_bins_valeur, duplicates='drop') # # df[f"{index}_bin"], bins_1h = pd.qcut(df[f"{index}"], q=n_bins_indice, labels=self.labels, retbins=True, # duplicates='drop') # df[f"{target}_bin"], bins_1d = pd.qcut(df[f"{target}"], q=n_bins_valeur, labels=self.labels, retbins=True, # duplicates='drop') # # Affichage formaté pour code Python # print(f"Bornes des quantiles pour {index} : [{', '.join([f'{b:.4f}' for b in bins_1h])}]") # print(f"Bornes des quantiles pour {target} : [{', '.join([f'{b:.4f}' for b in bins_1d])}]") # # # Tableau croisé (compte) # tableau = pd.crosstab(df[f"{index}_bin"], df[f"{target}_bin"]) # # # Facultatif : en pourcentages # tableau_pct = tableau.div(tableau.sum(axis=1), axis=0) * 100 # # # Affichage # print("Répartition brute :") # print(tableau) # print("\nRépartition en % par ligne :") # print(tableau_pct.round(2)) def calculateStats(self, df, index, target): # Nombre de tranches (modifiable) n_bins_indice = 11 n_bins_valeur = 11 # Créer les tranches dynamiques df['indice_tranche'] = pd.qcut(df[index], q=n_bins_indice, duplicates='drop') df['valeur_tranche'] = pd.qcut(df[target], q=n_bins_valeur, duplicates='drop') # Créer un tableau croisé avec la moyenne des valeurs pivot_mean = df.pivot_table( index='indice_tranche', columns='valeur_tranche', values=target, # <-- c'est la colonne qu'on agrège aggfunc='mean' # <-- on calcule la moyenne ) # Résultat print("Moyenne des valeurs par double-tranche :") print(pivot_mean.round(2)) def should_enter_trade(self, pair: str, last_candle) -> bool: limit = 3 if pair.startswith('BTC'): return True # BTC toujours autorisé # Filtrer les paires non-BTC non_btc_pairs = [p for p in self.pairs if not p.startswith('BTC')] # Compter les positions actives sur les paires non-BTC max_nb_trades = 0 total_non_btc = 0 max_pair = '' for p in non_btc_pairs: max_nb_trades = max(max_nb_trades, self.pairs[p]['count_of_buys']) if (max_nb_trades == self.pairs[p]['count_of_buys'] and max_nb_trades > limit): max_pair = p total_non_btc += self.pairs[p]['count_of_buys'] pct_max = self.getPctLastBuy(pair, last_candle) # val = self.getProbaHausse(last_candle) # if (val < 40): # return False # if count_decrease == len(non_btc_pairs): # self.should_enter_trade_count += 1 # char="." # print(f"should_enter_trade canceled all pairs decreased {'':{char}>{self.should_enter_trade_count}}") # return False self.should_enter_trade_count = 0 if max_pair != '': return max_pair == pair or pct_max < - 0.25 else: return True