diff --git a/.gitignore b/.gitignore index 20e39fd..fafe920 100644 --- a/.gitignore +++ b/.gitignore @@ -81,6 +81,9 @@ target/ # Jupyter Notebook .ipynb_checkpoints +# Test notebooks +/experiments/notebooks/test_notebook.ipynb + # IPython profile_default/ ipython_config.py diff --git a/experiments/default_experiment.py b/experiments/default_experiment.py index 1bf31b3..a2258b1 100644 --- a/experiments/default_experiment.py +++ b/experiments/default_experiment.py @@ -24,5 +24,5 @@ # Configure Simulation & Experiment engine simulation.engine = experiment.engine experiment.engine.backend = Backend.SINGLE_PROCESS -experiment.engine.deepcopy = False +experiment.engine.deepcopy = False experiment.engine.drop_substeps = True diff --git a/experiments/notebooks/1_model_validation.ipynb b/experiments/notebooks/1_model_validation.ipynb index 92db079..d470f92 100644 --- a/experiments/notebooks/1_model_validation.ipynb +++ b/experiments/notebooks/1_model_validation.ipynb @@ -596,9 +596,9 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python (CADLabs Ethereum Economic Model)", "language": "python", - "name": "python3" + "name": "python-cadlabs-eth-model" }, "language_info": { "codemirror_mode": { @@ -610,7 +610,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.11" + "version": "3.8.2" } }, "nbformat": 4, diff --git a/experiments/notebooks/2_validator_revenue_and_profit_yields.ipynb b/experiments/notebooks/2_validator_revenue_and_profit_yields.ipynb index 844bbfd..c59befd 100644 --- a/experiments/notebooks/2_validator_revenue_and_profit_yields.ipynb +++ b/experiments/notebooks/2_validator_revenue_and_profit_yields.ipynb @@ -176,6 +176,16 @@ "df_1a, _exceptions = run(simulation_1a)" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "25560f38", + "metadata": {}, + "outputs": [], + "source": [ + "df_1a.query(\"number_of_validators_in_activation_queue == 0\")" + ] + }, { "cell_type": "code", "execution_count": null, @@ -478,7 +488,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.11" + "version": "3.8.5" } }, "nbformat": 4, diff --git a/experiments/notebooks/3_network_issuance_and_inflation_rate.ipynb b/experiments/notebooks/3_network_issuance_and_inflation_rate.ipynb index 714f42c..2735e68 100644 --- a/experiments/notebooks/3_network_issuance_and_inflation_rate.ipynb +++ b/experiments/notebooks/3_network_issuance_and_inflation_rate.ipynb @@ -245,9 +245,9 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python (CADLabs Ethereum Economic Model)", "language": "python", - "name": "python3" + "name": "python-cadlabs-eth-model" }, "language_info": { "codemirror_mode": { @@ -259,7 +259,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.11" + "version": "3.8.2" } }, "nbformat": 4, diff --git a/experiments/notebooks/4_validator_pool_compounding.ipynb b/experiments/notebooks/4_validator_pool_compounding.ipynb new file mode 100644 index 0000000..90af800 --- /dev/null +++ b/experiments/notebooks/4_validator_pool_compounding.ipynb @@ -0,0 +1,587 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Experiment Notebook: Compounding Yields for Pool Validators (Model Extension 5 - WIP)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Table of Contents\n", + "* [Experiment Summary](#Experiment-Summary)\n", + "* [Experiment Assumptions](#Experiment-Assumptions)\n", + "* [Experiment Setup](#Experiment-Setup)\n", + "* [Analysis 1: Extended Time-domain](#Analysis-1:-Extended-Time-domain)\n", + "* [Analysis 2: Sweep of Pool Size](#Analysis-2:-Sweep-of-Pool-Size)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Experiment Summary \n", + "\n", + "Each discrete validator requires a 32 ETH deposit when initialized. A validator's effective balance – the value used to calculate validator rewards – is a maximum of 32 ETH. Any rewards a validator earns above and beyond the 32 ETH requirement do not contribute to their yields until they accrue an additional 32 ETH and create another validator instance. This prevents a solo validator from reinvesting their yields to receive compound interest.\n", + "\n", + "On the other hand, stakers that utilise validator pools, on exchanges for example, can compound their returns by pooling the returns of multiple validators to initialize another validator with 32 ETH. The pooling of returns and initialization of a shared validator effectively results in compound interest for those utilising staking pools, potentially resulting in much higher yields, especially over longer periods of time, than that of solo / distributed validators.\n", + "\n", + "The following experiment notebook investigates ...\n", + "\n", + "\n", + "# Experiment Assumptions\n", + "\n", + "* AVG Pool Size captures the ***initial, average pool size*** accross all pool environments\n", + "\n", + "* In order to ensure consistent analysis on the effect of 'average pool size', new validators initialised externally to pools (i.e. from 'validator_process') assemble new pools as oposed to joining existing pools. Consequently, pool sizes grow only when new shared validators are intialized. \n", + "\n", + "* Pooling begins simultenously across all pools. Because pool sizes are captured as an average, new shared validator instances are initialised simultaneously across all pools. This leads to sudden 'jumps' in new shared validator instances as pools accrue the target stake ammount at the same time. In reality, such jumps are unlikely to occur due to variations across validator pool environments.\n", + "\n", + "* The current implementation assumes all pool validators engage in pool yield compounding. The model could include a parameter accounting for the fraction of validator pools engaging in pool compounding once more data is known.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Experiment Setup\n", + "\n", + "We begin with several experiment-notebook-level preparatory setup operations:\n", + "\n", + "* Import relevant dependencies\n", + "* Import relevant experiment templates\n", + "* Create copies of experiments\n", + "* Configure and customize experiments " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Import the setup module:\n", + "# * sets up the Python path\n", + "# * runs shared notebook configuration methods, such as loading IPython modules\n", + "import setup\n", + "\n", + "import copy\n", + "import logging\n", + "import numpy as np\n", + "import pandas as pd\n", + "import plotly.express as px\n", + "\n", + "import experiments.notebooks.visualizations as visualizations\n", + "from experiments.run import run\n", + "from experiments.utils import display_code\n", + "from model.types import Stage\n", + "from model.constants import epochs_per_day, epochs_per_week, epochs_per_month\n", + "from model.state_variables import validator_count_distribution\n", + "from model.system_parameters import pool_validator_indeces" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Enable/disable logging\n", + "logger = logging.getLogger()\n", + "logger.disabled = False" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Import experiment templates\n", + "import experiments.templates.time_domain_analysis as time_domain_analysis\n", + "import experiments.templates.pool_size_sweep_analysis as pool_size_sweep_analysis" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Analysis 1: Extended Time-domain" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "(Simulate the model over a 10 year period and plot relevent metrics.)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "experiment = time_domain_analysis.experiment\n", + "experiment.engine.deepcopy = True \n", + "simulation_1 = copy.deepcopy(time_domain_analysis.experiment.simulations[0])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Experiment configuration:\n", + "DELTA_TIME = epochs_per_week # epochs per timestep \n", + "\n", + "SIMULATION_TIME_MONTHS = 10 * 12\n", + "TIMESTEPS = epochs_per_month * SIMULATION_TIME_MONTHS // DELTA_TIME\n", + "\n", + "simulation_1.timesteps = TIMESTEPS\n", + "\n", + "normal_adoption = simulation_1.model.params['validator_process'][0](_run=None, _timestep=None)\n", + "\n", + "simulation_1.model.params.update({\n", + " \"dt\": [DELTA_TIME], # (default: per week)\n", + " \"stage\": [Stage.ALL],\n", + " \"avg_pool_size\": [1, 10, 100, 1000], # AVG initial pool size\n", + " \"eth_price_process\": [lambda _run, _timestep: 3000],\n", + " 'validator_process': [lambda _run, _timestep: normal_adoption * 1],\n", + " \n", + "})" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Calculate inititial number of pools (derived from 'avg_pool_size' parameter list)\n", + "\n", + "avg_pool_size_list = simulation_1.model.params['avg_pool_size']\n", + "nValidatorEnvironments = len(validator_count_distribution)\n", + "number_of_pools_list = np.zeros((len(avg_pool_size_list), nValidatorEnvironments))\n", + "\n", + "\n", + "for i in range(len(avg_pool_size_list)): \n", + " for y in range(nValidatorEnvironments):\n", + " if y in pool_validator_indeces:\n", + " number_of_pools_list[i][y] = np.round(validator_count_distribution[y] / avg_pool_size_list[i])\n", + "\n", + " \n", + "simulation_1.model.params.update({\"number_of_pools\": number_of_pools_list})" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Experiment execution\n", + "df_1, exceptions = run(simulation_1)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Visualizations" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### To Do:\n", + "* Create visualization plots in __init__.py\n", + "* Label all x-axis time as Date\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Validator Pools Metrics" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# AVG Annualized Daily Profit Yields (%) per pool\n", + "\n", + "px.line(\n", + " df_1,\n", + " x='timestamp',\n", + " y=['diy_hardware_profit_yields_pct', 'diy_cloud_profit_yields_pct','pool_staas_pool_profit_yields_pct', 'pool_hardware_pool_profit_yields_pct', 'pool_cloud_pool_profit_yields_pct'],\n", + " animation_frame='avg_pool_size',\n", + " title='AVG Profit Yields (%) per pool'\n", + ")\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# AVG Annualized Daily Profit Yields (%) per pool\n", + "\n", + "px.line(\n", + " df_1,\n", + " x='timestamp',\n", + " y=['diy_hardware_profit_yields_pct', 'pool_cloud_pool_profit_yields_pct'],\n", + " animation_frame='avg_pool_size',\n", + " title='AVG Profit Yields (%) per pool'\n", + ")\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Cumulative Profit Yields \n", + "\n", + "px.line(\n", + " df_1,\n", + " x='timestamp',\n", + " y=['diy_hardware_cumulative_profit_yields_pct', 'diy_cloud_cumulative_profit_yields_pct', 'pool_staas_pool_cumulative_profit_yields_pct', 'pool_hardware_pool_cumulative_profit_yields_pct', 'pool_cloud_pool_cumulative_profit_yields_pct'],\n", + " animation_frame='avg_pool_size',\n", + " title='Cumulative Profit Yields (%) per pool'\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Cumulative Profit Yields \n", + "\n", + "px.line(\n", + " df_1,\n", + " x='timestamp',\n", + " y=['diy_hardware_cumulative_profit_yields_pct', 'pool_cloud_pool_cumulative_profit_yields_pct'],\n", + " animation_frame='avg_pool_size',\n", + " title='Cumulative Profit Yields (%) per pool'\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Shared Validators per Pool\n", + "\n", + "px.line(\n", + " df_1,\n", + " x='timestamp',\n", + " y=['pool_staas_shared_validators_per_pool', 'pool_hardware_shared_validators_per_pool', 'pool_cloud_shared_validators_per_pool'],\n", + " animation_frame='avg_pool_size'\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# AVG ETH STAKED per pool\n", + "\n", + "# px.line(\n", + "# df_1,\n", + "# x='timestamp',\n", + "# y=['pool_staas_pool_eth_staked', 'pool_hardware_pool_eth_staked', 'pool_cloud_pool_eth_staked'],\n", + "# animation_frame='avg_pool_size'\n", + "# )\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Pool size\n", + "\n", + "# px.line(\n", + "# df_1,\n", + "# x='timestamp',\n", + "# y=['pool_staas_pool_size', 'pool_hardware_pool_size', 'pool_cloud_pool_size'],\n", + "# animation_frame='avg_pool_size'\n", + "# )\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# AVG Profit (USD) per pool\n", + "\n", + "# px.line(\n", + "# df_1,\n", + "# x='timestamp',\n", + "# y=['pool_staas_pool_profit', 'pool_hardware_pool_profit', 'pool_cloud_pool_profit'],\n", + "# animation_frame='avg_pool_size'\n", + "# )\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Environment-level Metrics" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Total Validators" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# px.line(\n", + "# df_1,\n", + "# x='timestamp',\n", + "# y=['pool_staas_validator_count', 'pool_hardware_validator_count', 'pool_cloud_validator_count', 'diy_hardware_validator_count', 'diy_cloud_validator_count'],\n", + "# animation_frame='avg_pool_size'\n", + "# )" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Shared Validators" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# px.line(\n", + "# df_1,\n", + "# x='timestamp',\n", + "# y=['pool_staas_shared_validators', 'pool_hardware_shared_validators', 'pool_cloud_shared_validators', 'diy_hardware_shared_validators', 'diy_cloud_shared_validators'],\n", + "# animation_frame='avg_pool_size'\n", + "# )" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### ETH Staked" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# px.line(\n", + "# df_1,\n", + "# x='timestamp',\n", + "# y=['pool_staas_eth_staked', 'pool_hardware_eth_staked', 'pool_cloud_eth_staked', 'diy_hardware_eth_staked', 'staas_full_eth_staked'],\n", + "# animation_frame='avg_pool_size'\n", + "# )" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Analysis 2: Sweep of Pool Size" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Phase-space analysis showing metrics as a function of the average pool size for pool validators using pool compounding.\n", + "\n", + "* In order to accurately account for the compounding of pool validator yeilds over time, we first simulate the model over the desired time-horizon.\n", + "* Then, we perform a phase-space analysis at the desired timestep (e.g. at year 10)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Analysis-specific setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Fetch the pool-size sweep analysis experiment\n", + "experiment = pool_size_sweep_analysis.experiment\n", + "experiment.engine.deepcopy = True \n", + "# Create a copy of the experiment simulation\n", + "simulation_2 = copy.deepcopy(experiment.simulations[0])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Experiment configuration \n", + "\n", + "# Note: to change the default 'initial AVG pool size' samples, \n", + "# see 'pool_size_sweep_analysis.py' located in experiments/templates\n", + "DELTA_TIME = epochs_per_day # epochs per timestep (determines compounding period)\n", + "SIMULATION_TIME_MONTHS = 5 * 12 # number of months\n", + "TIMESTEPS = epochs_per_month * SIMULATION_TIME_MONTHS // DELTA_TIME\n", + "\n", + "\n", + "normal_adoption = simulation_2.model.params['validator_process'][0](_run=None, _timestep=None)\n", + "\n", + "simulation_2.model.params.update({\n", + " \"dt\": [DELTA_TIME], # determines compounding period (default: per day)\n", + " \"validator_process\": [lambda _run, _timestep: normal_adoption * 1], # New validators per epoch\n", + " \"stage\": [Stage.ALL], \n", + " \"eth_price_process\": [lambda _run, _timestep: 3000],\n", + " \n", + "})\n", + "\n", + "\n", + "# Set time horizon:\n", + "YEARS = 5\n", + "TIMESTEP_ANALYSIS = YEARS * 12 # convert years to months" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Experiment execution\n", + "df_2, exceptions = run(simulation_2)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Visualizations" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### To Do\n", + "* Set time analysis point in labels / headings" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# To plot a specific point in time without having to re-run the simulation, \n", + "# set TIMESTEP_ANALYSIS below and re-run the following cells.\n", + "\n", + "YEAR = 3\n", + "TIMESTEP_ANALYSIS = YEAR * 12 # convert year to month" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#visualizations.plot_pool_profit_over_pool_size(df_2, TIMESTEP_ANALYSIS)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "visualizations.plot_pool_profit_yields_over_pool_size(df_2, TIMESTEP_ANALYSIS)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#visualizations.plot_pool_cumulative_yields_over_pool_size(df_2, TIMESTEP_ANALYSIS, 2)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.5" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/experiments/notebooks/template.ipynb b/experiments/notebooks/template.ipynb index 84cd07c..8189fa8 100644 --- a/experiments/notebooks/template.ipynb +++ b/experiments/notebooks/template.ipynb @@ -181,7 +181,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.11" + "version": "3.8.5" } }, "nbformat": 4, diff --git a/experiments/notebooks/visualizations/__init__.py b/experiments/notebooks/visualizations/__init__.py index 6e72a3c..955812e 100644 --- a/experiments/notebooks/visualizations/__init__.py +++ b/experiments/notebooks/visualizations/__init__.py @@ -55,6 +55,27 @@ for validator in validator_environments ] ), + # @Ross + **dict( + [ + ( + validator.type + "_validator_count", + validator_environment_name_mapping[validator.type], + ) + for validator in validator_environments + ] + ), + # @Ross + **dict( + [ + ( + validator.type + "_profit", + validator_environment_name_mapping[validator.type], + ) + for validator in validator_environments + ] + ) + } axis_state_variable_name_mapping = { @@ -1507,6 +1528,63 @@ def plot_profit_yields_by_environment_over_time(df): return fig +def plot_validator_count_by_environment_over_time(df): + validator_count = [ + validator.type + "_validator_count" for validator in validator_environments + ] + + fig = go.Figure() + + for key in validator_count: + fig.add_trace( + go.Scatter( + x=df["timestamp"], + y=df[key], + name=legend_state_variable_name_mapping[key], + ) + ) + + fig.update_layout( + title="Validator Count by Environment Over Time", + xaxis_title="Date", + yaxis_title="Count", + legend_title="", + xaxis=dict(rangeslider=dict(visible=True), type="date"), + hovermode="x unified", + ) + + return fig + + +def plot_profit_by_environment_over_time(df): + validator_profit = [ + validator.type + "_profit" for validator in validator_environments + ] + + fig = go.Figure() + + for key in validator_profit: + fig.add_trace( + go.Scatter( + x=df["timestamp"], + y=df[key], + name=legend_state_variable_name_mapping[key], + ) + ) + + fig.update_layout( + title="Profit by Environment Over Time", + xaxis_title="Date", + yaxis_title="Profit", + legend_title="", + xaxis=dict(rangeslider=dict(visible=True), type="date"), + hovermode="x unified", + ) + + return fig + + + def plot_network_issuance_scenarios(df, simulation_names): df = df.set_index("timestamp", drop=False) @@ -1584,3 +1662,220 @@ def plot_network_issuance_scenarios(df, simulation_names): ) return fig + + + + +# Plots for pool compounding experiment notebook + + +def plot_eth_staked_over_pool_size(df, TIMESTEP_ANALYSIS): + fig = go.Figure() + + df_subset_0 = df.query("timestep == " + str(TIMESTEP_ANALYSIS)) + + # Add traces + # Pool environment(s) + fig.add_trace( + go.Scatter( + x=df_subset_0.avg_pool_size, + y=df_subset_0.pool_staas_eth_staked, + name=f"Pool Staas @ {df_subset_0.eth_price.iloc[0]:.0f} USD/ETH", + line=dict(color=cadlabs_colorway_sequence[1]), + ), + ) + fig.add_trace( + go.Scatter( + x=df_subset_0.avg_pool_size, + y=df_subset_0.pool_hardware_eth_staked, + name=f"Pool Hardware @ {df_subset_0.eth_price.iloc[0]:.0f} USD/ETH", + line=dict(color=cadlabs_colorway_sequence[2]), + ), + ) + fig.add_trace( + go.Scatter( + x=df_subset_0.avg_pool_size, + y=df_subset_0.pool_cloud_eth_staked, + name=f"Pool Cloud @ {df_subset_0.eth_price.iloc[0]:.0f} USD/ETH", + line=dict(color=cadlabs_colorway_sequence[3]), + ), + ) + # Non-pool environment(s) + fig.add_trace( + go.Scatter( + x=df_subset_0.avg_pool_size, + y=df_subset_0.diy_hardware_eth_staked, + name=f"DIY Hardware @ {df_subset_0.eth_price.iloc[0]:.0f} USD/ETH", + line=dict(color=cadlabs_colorway_sequence[4], dash="dash"), + ), + ) + + update_legend_names(fig) + + fig.update_layout( + title="Eth Staked over AVG Pool Size", + xaxis_title="AVG Pool Size", + legend_title="Validator Environments", + ) + + # Set secondary y-axes titles + fig.update_yaxes(title_text="Eth Staked (ETH/)") + fig.update_layout(hovermode="x unified") + + return fig + + +def plot_pool_profit_over_pool_size(df, TIMESTEP_ANALYSIS): + fig = go.Figure() + + df_subset_0 = df.query("timestep == " + str(TIMESTEP_ANALYSIS)) + + # Add traces + # Pool environment(s) + fig.add_trace( + go.Scatter( + x=df_subset_0.avg_pool_size, + y=df_subset_0.pool_staas_pool_profit, + name=f"Pool Staas - Profit @ {df_subset_0.eth_price.iloc[0]:.0f} USD/ETH", + line=dict(color=cadlabs_colorway_sequence[1]), + ), + ) + fig.add_trace( + go.Scatter( + x=df_subset_0.avg_pool_size, + y=df_subset_0.pool_hardware_pool_profit, + name=f"Pool Hardware - Profit @ {df_subset_0.eth_price.iloc[0]:.0f} USD/ETH", + line=dict(color=cadlabs_colorway_sequence[2]), + ), + ) + fig.add_trace( + go.Scatter( + x=df_subset_0.avg_pool_size, + y=df_subset_0.pool_cloud_pool_profit, + name=f"Pool Cloud - Profit @ {df_subset_0.eth_price.iloc[0]:.0f} USD/ETH", + line=dict(color=cadlabs_colorway_sequence[3]), + ), + ) + + update_legend_names(fig) + + fig.update_layout( + title="AVG Pool Profit over initial Pool Size", + xaxis_title="AVG Iniitial Pool Size", + legend_title="Validator Environments", + ) + + # Set secondary y-axes titles + fig.update_yaxes(title_text="Profit (USD)") + fig.update_layout(hovermode="x unified") + + return fig + + +def plot_pool_profit_yields_over_pool_size(df, TIMESTEP_ANALYSIS): + fig = go.Figure() + yearString = str(TIMESTEP_ANALYSIS / 12) + df_subset_0 = df.query("timestep == " + str(TIMESTEP_ANALYSIS)) + + # Add traces + # Pool environment(s) + fig.add_trace( + go.Scatter( + x=df_subset_0.avg_pool_size, + y=df_subset_0.pool_staas_pool_profit_yields_pct, + name=f"Pool Staas @ {df_subset_0.eth_price.iloc[0]:.0f} USD/ETH", + line=dict(color=cadlabs_colorway_sequence[1]), + ), + ) + fig.add_trace( + go.Scatter( + x=df_subset_0.avg_pool_size, + y=df_subset_0.pool_hardware_pool_profit_yields_pct, + name=f"Pool Hardware @ {df_subset_0.eth_price.iloc[0]:.0f} USD/ETH", + line=dict(color=cadlabs_colorway_sequence[2]), + ), + ) + fig.add_trace( + go.Scatter( + x=df_subset_0.avg_pool_size, + y=df_subset_0.pool_cloud_pool_profit_yields_pct, + name=f"Pool Cloud @ {df_subset_0.eth_price.iloc[0]:.0f} USD/ETH", + line=dict(color=cadlabs_colorway_sequence[3]), + ), + ) + + + update_legend_names(fig) + + fig.update_layout( + title="AVG Annualized Pool Profit Yields over initial Pool Size", + xaxis_title="AVG Initial Pool Size", + legend_title="Validator Environments", + ) + + # Set secondary y-axes titles + fig.update_yaxes(title_text="Profit Yields (%/Year)") + fig.update_layout(hovermode="x unified") + + return fig + + + +def plot_pool_cumulative_yields_over_pool_size(df, TIMESTEP_ANALYSIS, poolSize): + + + + fig = go.Figure() + yearString = str(TIMESTEP_ANALYSIS / 12) + df_subset = df.query("timestep == " + str(TIMESTEP_ANALYSIS)) + + # Add traces + # Pool environment(s) + fig.add_trace( + go.Scatter( + x=df_subset.avg_pool_size, + y=df_subset.pool_staas_pool_cumulative_profit_yields_pct, + name=f"Pool Staas @ {df_subset.eth_price.iloc[0]:.0f} USD/ETH", + line=dict(color=cadlabs_colorway_sequence[1]), + ), + ) + fig.add_trace( + go.Scatter( + x=df_subset.avg_pool_size, + y=df_subset.pool_hardware_pool_cumulative_profit_yields_pct, + name=f"Pool Hardware @ {df_subset.eth_price.iloc[0]:.0f} USD/ETH", + line=dict(color=cadlabs_colorway_sequence[2]), + ), + ) + fig.add_trace( + go.Scatter( + x=df_subset.avg_pool_size, + y=df_subset.pool_cloud_pool_cumulative_profit_yields_pct, + name=f"Pool Cloud @ {df_subset.eth_price.iloc[0]:.0f} USD/ETH", + line=dict(color=cadlabs_colorway_sequence[3]), + ), + ) + + fig.add_trace( + go.Scatter( + x=df_subset.avg_pool_size, + y=df_subset.diy_hardware_cumulative_profit_yields_pct, + name=f"Pool Cloud @ {df_subset.eth_price.iloc[0]:.0f} USD/ETH", + line=dict(color=cadlabs_colorway_sequence[3]), + ), + ) + + + update_legend_names(fig) + + fig.update_layout( + title="AVG Cumulative Annualized Pool Profit Yields over initial Pool Size", + xaxis_title="AVG Initial Pool Size", + legend_title="Validator Environments", + ) + + # Set secondary y-axes titles + fig.update_yaxes(title_text="Profit Yields (%/Year)") + fig.update_layout(hovermode="x unified") + + return fig \ No newline at end of file diff --git a/experiments/outputs/validator_environment_yield_contour.png b/experiments/outputs/validator_environment_yield_contour.png index efabb7d..087d99c 100644 Binary files a/experiments/outputs/validator_environment_yield_contour.png and b/experiments/outputs/validator_environment_yield_contour.png differ diff --git a/experiments/post_processing.py b/experiments/post_processing.py index f6498bc..3cb4971 100644 --- a/experiments/post_processing.py +++ b/experiments/post_processing.py @@ -21,9 +21,14 @@ def post_process(df: pd.DataFrame, drop_timestep_zero=True, parameters=parameter # Assign parameters to DataFrame assign_parameters(df, parameters, [ # Parameters to assign to DataFrame - 'dt' + 'dt', + 'avg_pool_size' ]) + df[[validator.type + '_eth_staked' for validator in validator_environments]] = df.apply(lambda row: list(row.validator_eth_staked), axis=1, result_type='expand').astype('float32') + df[[validator.type + '_pool_eth_staked' for validator in validator_environments]] = df.apply(lambda row: list(row.validator_pool_eth_staked), axis=1, result_type='expand').astype('float32') + + # Dissagregate validator count df[[validator.type + '_validator_count' for validator in validator_environments]] = df.apply(lambda row: list(row.validator_count_distribution), axis=1, result_type='expand').astype('float32') @@ -42,21 +47,37 @@ def post_process(df: pd.DataFrame, drop_timestep_zero=True, parameters=parameter df[['individual_validator_' + validator.type + '_costs' for validator in validator_environments]] = \ df[[validator.type + '_costs' for validator in validator_environments]].rename(columns=_mapping) / \ df[[validator.type + '_validator_count' for validator in validator_environments]] + df[[validator.type + '_shared_validators' for validator in validator_environments]] = df.apply(lambda row: list(row.number_of_shared_validators), axis=1, result_type='expand').astype('float32') # Dissagregate revenue and profit df[[validator.type + '_revenue' for validator in validator_environments]] = df.apply(lambda row: list(row.validator_revenue), axis=1, result_type='expand').astype('float32') df[[validator.type + '_profit' for validator in validator_environments]] = df.apply(lambda row: list(row.validator_profit), axis=1, result_type='expand').astype('float32') + + # Dissagregate pool metrics + df[[validator.type + '_pool_profit' for validator in validator_environments]] = df.apply(lambda row: list(row.validator_pool_profit), axis=1, result_type='expand').astype('float32') + df[[validator.type + '_pool_profit_yields' for validator in validator_environments]] = df.apply(lambda row: list(row.validator_pool_profit_yields), axis=1, result_type='expand').astype('float32') + # df[[validator.type + '_pool_cumulative_yields' for validator in validator_environments]] = df.apply(lambda row: list(row.pool_cumulative_yields), axis=1, result_type='expand').astype('float32') + df[[validator.type + '_stakers_per_pool' for validator in validator_environments]] = df.apply(lambda row: list(row.stakers_per_pool), axis=1, result_type='expand').astype('float32') + df[[validator.type + '_shared_validators_per_pool' for validator in validator_environments]] = df.apply(lambda row: list(row.shared_validators_per_pool), axis=1, result_type='expand').astype('float32') + df[[validator.type + '_pool_size' for validator in validator_environments]] = df.apply(lambda row: list(row.pool_size), axis=1, result_type='expand').astype('float32') + # Dissagregate yields df[[validator.type + '_revenue_yields' for validator in validator_environments]] = df.apply(lambda row: list(row.validator_revenue_yields), axis=1, result_type='expand').astype('float32') df[[validator.type + '_profit_yields' for validator in validator_environments]] = df.apply(lambda row: list(row.validator_profit_yields), axis=1, result_type='expand').astype('float32') + # Convert decimals to percentages df[[validator.type + '_revenue_yields_pct' for validator in validator_environments]] = df[[validator.type + '_revenue_yields' for validator in validator_environments]] * 100 df[[validator.type + '_profit_yields_pct' for validator in validator_environments]] = df[[validator.type + '_profit_yields' for validator in validator_environments]] * 100 + + df[[validator.type + '_pool_profit_yields_pct' for validator in validator_environments]] = df[[validator.type + '_pool_profit_yields' for validator in validator_environments]] * 100 + # df[[validator.type + '_pool_cumulative_yields_pct' for validator in validator_environments]] = df[[validator.type + '_pool_cumulative_yields' for validator in validator_environments]] * 100 + df['supply_inflation_pct'] = df['supply_inflation'] * 100 df['total_revenue_yields_pct'] = df['total_revenue_yields'] * 100 df['total_profit_yields_pct'] = df['total_profit_yields'] * 100 + # Calculate revenue-profit yield spread df['revenue_profit_yield_spread_pct'] = df['total_revenue_yields_pct'] - df['total_profit_yields_pct'] @@ -83,9 +104,20 @@ def post_process(df: pd.DataFrame, drop_timestep_zero=True, parameters=parameter # Calculate cumulative revenue and profit yields df["daily_revenue_yields_pct"] = df["total_revenue_yields_pct"] / (constants.epochs_per_year / df['dt']) df["cumulative_revenue_yields_pct"] = df.groupby('subset')["daily_revenue_yields_pct"].transform('cumsum') + df["daily_profit_yields_pct"] = df["total_profit_yields_pct"] / (constants.epochs_per_year / df['dt']) df["cumulative_profit_yields_pct"] = df.groupby('subset')["daily_profit_yields_pct"].transform('cumsum') + df['validator_daily_profit_yield_pct'] = df['validator_profit_yields'] / (constants.epochs_per_year / df['dt']) * 100 + df[[validator.type + '_daily_profit_yields_pct' for validator in validator_environments]] = df.apply(lambda row: list(row.validator_daily_profit_yield_pct), axis=1, result_type='expand').astype('float32') + df[[validator.type + '_cumulative_profit_yields_pct' for validator in validator_environments]] = df.groupby('subset')[[validator.type + '_daily_profit_yields_pct' for validator in validator_environments]].transform('cumsum') + + # AVG Cumulative yields per pool + df['validator_pool_daily_profit_yield_pct'] = df['validator_pool_profit_yields'] / (constants.epochs_per_year / df['dt']) * 100 + df[[validator.type + '_pool_daily_profit_yields_pct' for validator in validator_environments]] = df.apply(lambda row: list(row.validator_pool_daily_profit_yield_pct), axis=1, result_type='expand').astype('float32') + df[[validator.type + '_pool_cumulative_profit_yields_pct' for validator in validator_environments]] = df.groupby('subset')[[validator.type + '_pool_daily_profit_yields_pct' for validator in validator_environments]].transform('cumsum') + + # Drop the initial state for plotting if drop_timestep_zero: df = df.drop(df.query('timestep == 0').index) diff --git a/experiments/templates/pool_size_sweep_analysis.py b/experiments/templates/pool_size_sweep_analysis.py new file mode 100644 index 0000000..2c3e255 --- /dev/null +++ b/experiments/templates/pool_size_sweep_analysis.py @@ -0,0 +1,57 @@ +""" +# Pool Size Sweep Analysis + +Creates a parameter sweep of the avg_pool_size parameter, . +""" + +import numpy as np +import copy + +import model.constants as constants +from experiments.default_experiment import experiment +from model.system_parameters import pool_validator_indeces +from model.state_variables import validator_count_distribution + +# Make a copy of the default experiment to avoid mutation +experiment = copy.deepcopy(experiment) + +DELTA_TIME = constants.epochs_per_day # epochs per timestep +SIMULATION_TIME_MONTHS = 5 * 12 # number of months +TIMESTEPS = constants.epochs_per_month * SIMULATION_TIME_MONTHS // DELTA_TIME + +pool_size_samples = np.linspace( + 1, + 200, + 10, + dtype=int +) + + +# Calculate inititial number of pools (derived from 'avg_pool_size' parameter list) + +nValidatorEnvironments = len(validator_count_distribution) +number_of_pools_list = np.zeros((len(pool_size_samples), nValidatorEnvironments)) + +for i in range(len(pool_size_samples)): + for y in range(nValidatorEnvironments): + if y in pool_validator_indeces: + number_of_pools_list[i][y] = np.round(validator_count_distribution[y] / pool_size_samples[i]) + + + + +parameter_overrides = { + "avg_pool_size": pool_size_samples, + "number_of_pools": number_of_pools_list, + "eth_price_process": [ + lambda _run, _timestep: 2000, + ] +} + +# Override default experiment parameters +experiment.simulations[0].model.params.update(parameter_overrides) +# Set runs +experiment.simulations[0].runs = 1 +# Run single timestep, set unit of time to multiple epochs +experiment.simulations[0].timesteps = TIMESTEPS +experiment.simulations[0].model.params.update({"dt": [DELTA_TIME]}) diff --git a/model/constants.py b/model/constants.py index 44fc1f8..aabdf0b 100644 --- a/model/constants.py +++ b/model/constants.py @@ -6,6 +6,7 @@ wei = 1e18 slots_per_epoch = 32 epochs_per_day = 225 +epochs_per_week = 1575 epochs_per_month = 6750 epochs_per_year = 82180 pow_blocks_per_epoch = 32.0 * 12 / 13 diff --git a/model/parts/system_metrics.py b/model/parts/system_metrics.py index c456d83..67053de 100644 --- a/model/parts/system_metrics.py +++ b/model/parts/system_metrics.py @@ -5,9 +5,12 @@ """ import typing +import numpy as np import model.constants as constants -from model.types import Percentage, Gwei +from model.types import Percentage, Gwei, ETH + +from model.system_parameters import validator_environments def policy_validator_costs( @@ -19,7 +22,7 @@ def policy_validator_costs( """ # Parameters dt = params["dt"] - validator_percentage_distribution = params["validator_percentage_distribution"] + validator_hardware_costs_per_epoch = params["validator_hardware_costs_per_epoch"] validator_cloud_costs_per_epoch = params["validator_cloud_costs_per_epoch"] validator_third_party_costs_per_epoch = params[ @@ -30,11 +33,10 @@ def policy_validator_costs( eth_price = previous_state["eth_price"] number_of_validators = previous_state["number_of_active_validators"] total_online_validator_rewards = previous_state["total_online_validator_rewards"] - - # Calculate hardware, cloud, and third-party costs per validator type - validator_count_distribution = ( - number_of_validators * validator_percentage_distribution - ) + validator_percentage_distribution = previous_state[ + "validator_percentage_distribution" + ] + validator_count_distribution = previous_state["validator_count_distribution"] validator_hardware_costs = ( validator_count_distribution * validator_hardware_costs_per_epoch * dt @@ -59,7 +61,6 @@ def policy_validator_costs( total_network_costs = validator_costs.sum(axis=0) return { - "validator_count_distribution": validator_count_distribution, "validator_hardware_costs": validator_hardware_costs, "validator_cloud_costs": validator_cloud_costs, "validator_third_party_costs": validator_third_party_costs, @@ -77,7 +78,6 @@ def policy_validator_yields( """ # Parameters dt = params["dt"] - validator_percentage_distribution = params["validator_percentage_distribution"] # State Variables eth_price = previous_state["eth_price"] @@ -85,8 +85,11 @@ def policy_validator_yields( validator_costs = previous_state["validator_costs"] total_network_costs = previous_state["total_network_costs"] total_online_validator_rewards = previous_state["total_online_validator_rewards"] - validator_count_distribution = previous_state["validator_count_distribution"] average_effective_balance = previous_state["average_effective_balance"] + validator_count_distribution = previous_state["validator_count_distribution"] + validator_percentage_distribution = previous_state[ + "validator_percentage_distribution" + ] # Calculate ETH staked per validator type validator_eth_staked = validator_count_distribution * average_effective_balance @@ -96,6 +99,7 @@ def policy_validator_yields( validator_revenue = ( validator_percentage_distribution * total_online_validator_rewards ) + validator_revenue /= constants.gwei # Convert from Gwei to ETH validator_revenue *= eth_price # Convert from ETH to Dollars @@ -139,6 +143,179 @@ def policy_validator_yields( } +def policy_pool_yields( + params, substep, state_history, previous_state +) -> typing.Dict[str, any]: + """ + ## Pool-level validator Yields Policy Function + + Calculate the profit yields for pools across validator pool environemnts. + """ + + # Parameters + dt = params["dt"] + avg_pool_size = params["avg_pool_size"] + number_of_pools_param = params["number_of_pools"] + pool_validator_indeces = params["pool_validator_indeces"] + eth_price = previous_state["eth_price"] + + # State Variables + validator_count_distribution = previous_state["validator_count_distribution"] + validator_pools_available_profits_eth = previous_state[ + "validator_pools_available_profits_eth" + ] + number_of_shared_validators = previous_state["number_of_shared_validators"] + average_effective_balance = previous_state["average_effective_balance"] + + validator_pool_eth_staked = previous_state["validator_pool_eth_staked"] + validator_pool_profit = previous_state["validator_pool_profit"] + validator_pool_profit_yields = previous_state["validator_pool_profit_yields"] + number_of_pools = previous_state["number_of_pools"] + pool_size = previous_state["pool_size"] + validator_profit = previous_state["validator_profit"] + stakers_per_pool = previous_state["stakers_per_pool"] + shared_validators_per_pool = previous_state["shared_validators_per_pool"] + + if ( + avg_pool_size is not None and avg_pool_size > 0 + ): # returns true if analysis is investigating compounding yields (see model extension #5) + + # Use param value if state variable not yet determined + if number_of_pools.sum() == 0: + number_of_pools = number_of_pools_param + + shared_validators_eth_staked = number_of_shared_validators * ( + average_effective_balance / constants.gwei + ) + + stake_requirement = constants.eth_deposited_per_validator + + number_of_stakers = validator_count_distribution - number_of_shared_validators + + # Calculate pool sizes across environments (validators from validator process assemble new pools. See assumptions in experiment notebook #4): + number_of_pools = np.floor(number_of_stakers / avg_pool_size) + + for ( + i + ) in ( + pool_validator_indeces + ): # avoids division by zero where pooling does not apply + + stakers_per_pool[i] = number_of_stakers[i] / number_of_pools[i] + shared_validators_per_pool[i] = ( + number_of_shared_validators[i] / number_of_pools[i] + ) + + pool_size[i] = stakers_per_pool[i] + shared_validators_per_pool[i] + + # Calculate average ETH staked for a pool + validator_pool_eth_staked[i] = ( + validator_count_distribution[i] + / number_of_pools[i] + * average_effective_balance + / constants.gwei + ) + + # Calculate average profit per pool + validator_pool_profit[i] = validator_profit[i] / number_of_pools[i] + + # Calculate average profit yields per pool + initStaked = stakers_per_pool[i] * stake_requirement * eth_price + validator_pool_profit_yields[i] = (validator_pool_profit[i]) / initStaked + + validator_pool_profit_yields[i] *= ( + constants.epochs_per_year / dt + ) # Annualize value + + return { + "validator_pool_eth_staked": validator_pool_eth_staked, + "validator_pool_profit": validator_pool_profit, + "validator_pool_profit_yields": validator_pool_profit_yields, + "stakers_per_pool": stakers_per_pool, + "shared_validators_per_pool": shared_validators_per_pool, + "pool_size": pool_size, + } + + +def policy_shared_validators( + params, substep, state_history, previous_state +) -> typing.Dict[str, any]: + """ + ## Validator Pooled Returns Policy Function + A compounding mechanism to calculate new validator instances created by pooling returns in staking pools. + + See extension #5 in the 'model extension roadmap'. + """ + + # Parameters + avg_pool_size = params["avg_pool_size"] + pool_validator_indeces = params["pool_validator_indeces"] + number_of_pools_param = params["number_of_pools"] + + # State Variables + eth_price = previous_state["eth_price"] + validator_profit = previous_state["validator_profit"] # (USD) + validator_pools_available_profits_eth = previous_state[ + "validator_pools_available_profits_eth" + ] + validator_count_distribution = previous_state["validator_count_distribution"] + number_of_shared_validators = previous_state["number_of_shared_validators"] + validator_percentage_distribution = previous_state[ + "validator_percentage_distribution" + ] + number_of_pools = previous_state["number_of_pools"] + + validator_costs = previous_state["validator_costs"] + total_online_validator_rewards = previous_state["total_online_validator_rewards"] + + # Constants & function variables + stake_requirement = constants.eth_deposited_per_validator + new_shared_validators = ( + 0 * previous_state["shared_validator_instances"] + ) # reset to zero + + validator_profit_eth = validator_profit / eth_price + + # Use param value if state variable not yet determined (i.e. at start of simulation) + if number_of_pools.sum(axis=0) == 0: + number_of_pools = number_of_pools_param + + if ( + avg_pool_size is not None and avg_pool_size > 0 + ): # avoid unnecessary computation if analysis is not investigating compounding + + for i in pool_validator_indeces: + + assert avg_pool_size < validator_count_distribution[i] + + # Calculate new shared validator instances initialized via pool compounding: + + # Aggregate existing profits, convert from USD to ETH + validator_pools_available_profits_eth[i] += validator_profit_eth[i] + avg_pool_profit = ( + validator_pools_available_profits_eth[i] / number_of_pools[i] + ) # Disaggregate profits to individual pool + new_shared_validators_per_pool = np.floor( + avg_pool_profit / stake_requirement + ) # Calculate new shared validators initialized by pool + + new_shared_validators[i] = ( + number_of_pools[i] * new_shared_validators_per_pool + ) # Aggregate + + validator_pools_available_profits_eth[i] -= ( + new_shared_validators[i] * stake_requirement + ) # Subtract the staked ammount from the available accumulated profits + + number_of_shared_validators += new_shared_validators + + return { + "validator_pools_available_profits_eth": validator_pools_available_profits_eth, + "shared_validator_instances": new_shared_validators, + "number_of_shared_validators": number_of_shared_validators, + } + + def policy_total_online_validator_rewards( params, substep, state_history, previous_state ) -> typing.Dict[str, Gwei]: diff --git a/model/parts/validators.py b/model/parts/validators.py index 91af942..e3ab93e 100644 --- a/model/parts/validators.py +++ b/model/parts/validators.py @@ -5,11 +5,13 @@ """ import typing +import numpy as np import model.constants as constants import model.parts.utils.ethereum_spec as spec from model.parts.utils import get_number_of_awake_validators from model.types import ETH, Gwei +from model.system_parameters import validator_environments def policy_staking( @@ -38,6 +40,7 @@ def policy_staking( if eth_staked_process(0, 0) is not None: # Get the ETH staked sample for the current run and timestep eth_staked = eth_staked_process(run, timestep * dt) + # Else, calculate from the number of validators else: eth_staked = number_of_validators * average_effective_balance / constants.gwei @@ -45,7 +48,9 @@ def policy_staking( # Assert expected conditions assert eth_staked <= eth_supply, f"ETH staked can't be more than ETH supply" - return {"eth_staked": eth_staked} + return { + "eth_staked": eth_staked + } def policy_validators(params, substep, state_history, previous_state): @@ -58,15 +63,20 @@ def policy_validators(params, substep, state_history, previous_state): eth_staked_process = params["eth_staked_process"] validator_process = params["validator_process"] validator_uptime_process = params["validator_uptime_process"] + avg_pool_size = params["avg_pool_size"] + validator_process_percentage_distribution = params["validator_percentage_distribution"] # State Variables run = previous_state["run"] timestep = previous_state["timestep"] number_of_active_validators = previous_state["number_of_active_validators"] - number_of_validators_in_activation_queue = previous_state[ - "number_of_validators_in_activation_queue" - ] + number_of_validators_in_activation_queue = previous_state["number_of_validators_in_activation_queue"] average_effective_balance = previous_state["average_effective_balance"] + validator_count_distribution = previous_state["validator_count_distribution"] + validator_percentage_distribution = previous_state["validator_percentage_distribution"] + shared_validator_instances = previous_state["shared_validator_instances"] + validators_in_activation_queue = previous_state["validators_in_activation_queue"] + # Calculate the number of validators using ETH staked if eth_staked_process(0, 0) is not None: @@ -74,19 +84,70 @@ def policy_validators(params, substep, state_history, previous_state): number_of_active_validators = int( round(eth_staked / (average_effective_balance / constants.gwei)) ) - else: - new_validators_per_epoch = validator_process(run, timestep * dt) - number_of_validators_in_activation_queue += new_validators_per_epoch * dt - - validator_churn_limit = ( - spec.get_validator_churn_limit(params, previous_state) * dt - ) - activated_validators = min( - number_of_validators_in_activation_queue, validator_churn_limit + validator_count_distribution = ( + number_of_active_validators * validator_percentage_distribution ) - number_of_active_validators += activated_validators - number_of_validators_in_activation_queue -= activated_validators + else: + + # Calculate the number of validators using the validator process + if(avg_pool_size == None or avg_pool_size < 1): # returns true if analysis is not investigating compounding yields (see model extension #5) + new_validators_per_epoch = validator_process(run, timestep * dt) + number_of_validators_in_activation_queue += new_validators_per_epoch * dt + + validator_churn_limit = ( + spec.get_validator_churn_limit(params, previous_state) * dt + ) + activated_validators = min( + number_of_validators_in_activation_queue, validator_churn_limit + ) + + number_of_active_validators += activated_validators + number_of_validators_in_activation_queue -= activated_validators + + validator_count_distribution = ( + number_of_active_validators * validator_percentage_distribution + ) + + + else: + # Agregrate new validators from both validator-process and + # the pool compounding mechanism (model extention #5): + + # Update validator queue: + validators_from_validator_process = validator_process(run, timestep * dt) * dt + validators_from_validator_process_per_environment = np.round(validator_process_percentage_distribution * validators_from_validator_process).astype(int) + new_validators_distribution = validators_from_validator_process_per_environment + shared_validator_instances + + validators_in_activation_queue += new_validators_distribution + number_of_validators_in_activation_queue = validators_in_activation_queue.sum(axis=0) + + # Calculate churn limit and update active validator count: + validator_churn_limit = ( + spec.get_validator_churn_limit(params, previous_state) * dt + ) + activated_validators = min( + (number_of_validators_in_activation_queue), validator_churn_limit + ) + number_of_active_validators += activated_validators + + + # Allocate validators accordingly: + new_validators_distribution_pct = ( # Calculate distribution percentage for new validators + new_validators_distribution / new_validators_distribution.sum(axis=0) + ) + # Allocate new validators to respective validator environments + validator_count_distribution += ( + new_validators_distribution_pct * activated_validators + ).astype(int) + # Determine new percentage distribution + validator_percentage_distribution = ( + validator_count_distribution / number_of_active_validators + ) + # Update the validator activation queue + validators_in_activation_queue -= np.round(new_validators_distribution_pct * activated_validators).astype(int) + + # Calculate the number of "awake" validators # See proposal: https://ethresear.ch/t/simplified-active-validator-cap-and-rotation-proposal @@ -100,11 +161,15 @@ def policy_validators(params, substep, state_history, previous_state): # Assume a participation of more than 2/3 due to lack of inactivity leak mechanism assert validator_uptime >= 2 / 3, "Validator uptime must be greater than 2/3" + return { "number_of_validators_in_activation_queue": number_of_validators_in_activation_queue, + "validators_in_activation_queue": validators_in_activation_queue, "number_of_active_validators": number_of_active_validators, "number_of_awake_validators": number_of_awake_validators, "validator_uptime": validator_uptime, + "validator_percentage_distribution": validator_percentage_distribution, + "validator_count_distribution": validator_count_distribution, } @@ -124,3 +189,5 @@ def policy_average_effective_balance( average_effective_balance = total_active_balance / number_of_validators return {"average_effective_balance": average_effective_balance} + + diff --git a/model/state_update_blocks.py b/model/state_update_blocks.py index 48c9026..76f3333 100644 --- a/model/state_update_blocks.py +++ b/model/state_update_blocks.py @@ -49,14 +49,24 @@ "number_of_validators_in_activation_queue": update_from_signal( "number_of_validators_in_activation_queue" ), + "validators_in_activation_queue": update_from_signal( + "validators_in_activation_queue" + ), "number_of_active_validators": update_from_signal( "number_of_active_validators" ), "number_of_awake_validators": update_from_signal("number_of_awake_validators"), "validator_uptime": update_from_signal("validator_uptime"), + "validator_percentage_distribution": update_from_signal( + "validator_percentage_distribution" + ), + "validator_count_distribution": update_from_signal( + "validator_count_distribution" + ), }, } + _state_update_blocks = [ { "description": """ @@ -203,9 +213,6 @@ "metric_validator_costs": metrics.policy_validator_costs, }, "variables": { - "validator_count_distribution": update_from_signal( - "validator_count_distribution" - ), "validator_hardware_costs": update_from_signal("validator_hardware_costs"), "validator_cloud_costs": update_from_signal("validator_cloud_costs"), "validator_third_party_costs": update_from_signal( @@ -235,6 +242,48 @@ "total_profit_yields": update_from_signal("total_profit_yields"), }, }, + { + "description": """ + Accounting of validator yield metrics associated with pooling returns + & initializing new shared validator instances + """, + "post_processing": False, + "policies": { + "pooling": metrics.policy_shared_validators, + }, + "variables": { + "validator_pools_available_profits_eth": update_from_signal( + "validator_pools_available_profits_eth" + ), + "shared_validator_instances": update_from_signal( + "shared_validator_instances" + ), + "number_of_shared_validators": update_from_signal( + "number_of_shared_validators" + ), + }, + }, + { + "description": """ + Accounting of validator pools yield metrics + """, + "post_processing": False, + "policies": {"staker_yields": metrics.policy_pool_yields}, + "variables": { + "validator_pool_eth_staked": update_from_signal( + "validator_pool_eth_staked" + ), + "validator_pool_profit": update_from_signal("validator_pool_profit"), + "validator_pool_profit_yields": update_from_signal( + "validator_pool_profit_yields" + ), + "stakers_per_pool": update_from_signal("stakers_per_pool"), + "shared_validators_per_pool": update_from_signal( + "shared_validators_per_pool" + ), + "pool_size": update_from_signal("pool_size"), + }, + }, ] # Conditionally update the order of the State Update Blocks using a ternary operator diff --git a/model/state_variables.py b/model/state_variables.py index 5295fc6..6c0ddbb 100644 --- a/model/state_variables.py +++ b/model/state_variables.py @@ -13,6 +13,8 @@ from datetime import datetime import model.constants as constants +from model.utils import default +from model.types import List import data.api.beaconchain as beaconchain import data.api.etherscan as etherscan import model.system_parameters as system_parameters @@ -40,6 +42,23 @@ eth_supply: ETH = etherscan.get_eth_supply(default=116_250_000e18) / constants.wei +validator_count_distribution = np.array( + [ + (validator.percentage_distribution * number_of_active_validators) + for validator in validator_environments + ], + dtype=int, +) + +validator_percentage_distribution = np.array( + [validator.percentage_distribution for validator in validator_environments], + dtype=float, +) + +# Set default value +number_of_pools_per_validator_environment = 0 * validator_count_distribution + + @dataclass class StateVariables: """State Variables @@ -77,6 +96,10 @@ class StateVariables: # Validator state variables number_of_validators_in_activation_queue: int = 0 """The number of validators in activation queue""" + validators_in_activation_queue: np.ndarray = np.zeros( + (number_of_validator_environments, 1), dtype=int + ) + """The number of validators in activation queue across validator environments""" average_effective_balance: Gwei = 32 * constants.gwei """The validator average effective balance""" number_of_active_validators: int = number_of_active_validators @@ -159,10 +182,73 @@ class StateVariables: ) """The total annualized profit (income received - costs) yields (percentage of investment amount) per validator environment""" - validator_count_distribution: np.ndarray = np.zeros( - (number_of_validator_environments, 1), dtype=int + + validator_count_distribution: List[np.ndarray] = default( + validator_count_distribution ) """The total number of validators per validator environment""" + validator_percentage_distribution: List[np.ndarray] = default( + validator_percentage_distribution + ) + """ + The percentage of validators in each environment, normalized to a total of 100%. + + A vector with a value for each validator environment. + """ + # Variables for Pool Compounding mechanism + + number_of_pools: np.ndarray = np.zeros( + (number_of_validator_environments, 1), dtype=int + ) + """The number of pools per validator environment""" + + validator_pool_eth_staked: np.ndarray = np.zeros( + (number_of_validator_environments, 1), dtype=float + ) + """The average ETH staked in pools, per validator environment""" + + validator_pool_profit: np.ndarray = np.zeros( + (number_of_validator_environments, 1), dtype=float + ) + """The average profit (ETH) per pool, per validator environment""" + validator_pool_profit_yields: np.ndarray = np.zeros( + (number_of_validator_environments, 1), dtype=float + ) + """The average profit yields (%) per pool, per validator environment""" + + pool_size: np.ndarray = np.zeros((number_of_validator_environments), dtype=int) + """""" + + stakers_per_pool: np.ndarray = np.zeros( + (number_of_validator_environments, 1), dtype=int + ) + """""" + shared_validators_per_pool: np.ndarray = np.zeros( + (number_of_validator_environments, 1), dtype=int + ) + """""" + + validators_in_activation_queue: np.ndarray = np.zeros( + (number_of_validator_environments), dtype=int + ) + """The validator distribution counts for queued validators. + + Used by simulations implementing the pool compounding mechanism to allocate + new validators to their respective validator environments. + """ + shared_validator_instances: np.ndarray = np.zeros( + (number_of_validator_environments), dtype=int + ) + """New 'shared' validators initialised by pool environments leveraging compounding""" + number_of_shared_validators: np.ndarray = np.zeros( + (number_of_validator_environments), dtype=int + ) + """The total number of shared validators initialised by pool environments leveraging compounding""" + validator_pools_available_profits_eth: np.ndarray = np.zeros( + (number_of_validator_environments), dtype=ETH + ) + """The pooled profits available in validator environments for initializing new shared validator instances""" + validator_hardware_costs: np.ndarray = np.zeros( (number_of_validator_environments, 1), dtype=USD ) diff --git a/model/system_parameters.py b/model/system_parameters.py index ad45f60..b4e6052 100644 --- a/model/system_parameters.py +++ b/model/system_parameters.py @@ -57,36 +57,43 @@ type="diy_hardware", percentage_distribution=0.37, hardware_costs_per_epoch=0.0014, + avg_validators_per_individual=2, ), ValidatorEnvironment( type="diy_cloud", percentage_distribution=0.13, cloud_costs_per_epoch=0.00027, + avg_validators_per_individual=20, ), ValidatorEnvironment( type="pool_staas", percentage_distribution=0.27, third_party_costs_per_epoch=0.12, + avg_validators_per_individual=1, ), ValidatorEnvironment( type="pool_hardware", percentage_distribution=0.05, hardware_costs_per_epoch=0.0007, + avg_validators_per_individual=4, ), ValidatorEnvironment( type="pool_cloud", percentage_distribution=0.02, cloud_costs_per_epoch=0.00136, + avg_validators_per_individual=4, ), ValidatorEnvironment( type="staas_full", percentage_distribution=0.08, third_party_costs_per_epoch=0.15, + avg_validators_per_individual=2, ), ValidatorEnvironment( type="staas_self_custodied", percentage_distribution=0.08, third_party_costs_per_epoch=0.12, + avg_validators_per_individual=2, ), ] """Validator environment configuration @@ -134,6 +141,20 @@ ) ] +# Store the indeces for pool validator environments +pool_validator_indeces = [] +for i in range(len(validator_environments)): + if "pool" in validator_environments[i].type: + pool_validator_indeces.append(i) + + +number_of_pools = [ + np.array( + [0 for validator in validator_environments], + dtype=int, + ) +] + @dataclass class Parameters: @@ -403,7 +424,24 @@ class Parameters: A vector with a value for each validator environment. """ + avg_pool_size: List[int] = default([None]) + """ + The average, initial number of validators per pool. + avg_pool_size is initialized by experiments investigating compounding returns for validators in pool environments. + See model extension #5, in ROADMAP.md, for more information about this implementation. + + By default, validator_pool_size is set to None. + """ + number_of_pools: List[np.ndarray] = default(number_of_pools) + """ + """ + + pool_validator_indeces: List[np.array] = default([pool_validator_indeces]) + """ + The indeces corresponding to pool validator environments. + Used by the pool compounding mechanism. + """ # Rewards, penalties, and slashing slashing_events_per_1000_epochs: List[int] = default([1]) # 1 / 1000 epochs """ diff --git a/model/types.py b/model/types.py index c7c81eb..3dbe712 100644 --- a/model/types.py +++ b/model/types.py @@ -69,3 +69,4 @@ class ValidatorEnvironment: hardware_costs_per_epoch: USD_per_epoch = 0.0 cloud_costs_per_epoch: USD_per_epoch = 0.0 third_party_costs_per_epoch: Percentage_per_epoch = 0.0 + avg_validators_per_individual: int = 1