From a33f3cc1e615bc250fd98bf29f94a4b7296b75c4 Mon Sep 17 00:00:00 2001 From: Albert Julius Liu Date: Sun, 8 Sep 2024 18:52:32 -0700 Subject: [PATCH 01/12] remove doc line regarding zero quantities --- src/icepool/__init__.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/icepool/__init__.py b/src/icepool/__init__.py index e1a081fa..2fd2ea93 100644 --- a/src/icepool/__init__.py +++ b/src/icepool/__init__.py @@ -13,8 +13,6 @@ * Instances are immutable (apart from internal caching). Anything that looks like it mutates an instance actually returns a separate instance with the change. -* Unless explictly specified otherwise, elements with zero quantity, rolls, etc. - are considered. """ __docformat__ = 'google' From bb757b71652b3ff4e8608c879aa9d545fc917f8f Mon Sep 17 00:00:00 2001 From: Albert Julius Liu Date: Sat, 14 Sep 2024 18:30:33 -0700 Subject: [PATCH 02/12] add `test_map_and_time_extra_args` --- tests/map_test.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tests/map_test.py b/tests/map_test.py index 062fb969..7590f7fd 100644 --- a/tests/map_test.py +++ b/tests/map_test.py @@ -176,3 +176,10 @@ def test_fractional_coin(): def test_stochastic_round(): assert ((6 @ d6) / 2).stochastic_round().mean() == 10.5 assert ((6 @ d6) / Fraction(3)).stochastic_round().mean() == 7 + + +def test_map_and_time_extra_args(): + def test_function(current, roll): + return min(current + roll, 10) + result = Die([0]).map_and_time(test_function, d6, repeat=10) + assert result.marginals[1].mean() == d6.mean_time_to_sum(10) From fd5a00c9efdb409793c99dd0191d88f43cfbb19b Mon Sep 17 00:00:00 2001 From: Albert Julius Liu Date: Sun, 15 Sep 2024 00:17:07 -0700 Subject: [PATCH 03/12] add dates to CHANGELOG.md --- CHANGELOG.md | 116 +++++++++++++++++++++++++-------------------------- 1 file changed, 58 insertions(+), 58 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0f435128..591b7f97 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,7 +20,7 @@ * Simplify determination of outcome order for multiset evaluations. * Simplify implementation of generator unbinding. -## v1.5.0 +## v1.5.0 - 23 August 2024 * Providing only a `drop` argument to `lowest()` or `highest()` will now keep all other elements rather than just the first non-dropped element. * `depth` argument to `Die.reroll()` is now mandatory. @@ -38,7 +38,7 @@ * Experimental `all_straights_reduce_counts` and `argsort` multiset evaluations. * Breaking change: `nearest`, `quantity`, `quantities`, `probability`, `probabilities`, `keep_counts` no longer have separate variants for each comparison; instead, they now take a comparison argument. `quantities` and `probabilities` now accept a comparison argument but no longer accept a list of outcomes. -## v1.4.0 +## v1.4.0 - 1 February 2024 * Rename `keep_counts` to `keep_counts_ge`. Add `le`, `lt`, `gt`, `eq`, and `ne` variants. * Add `count_subset` evaluation that counts how many times the right side is contained in the left. @@ -49,14 +49,14 @@ * Fix `Symbols` intersection. * Fix argument order in `__rfloordiv__`. -## v1.3.0 +## v1.3.0 - 30 December 2023 * Fix `Symbols` operator priority with `Population`, `AgainExpression`. * Added experimental `map_to_pool` and `explode_to_pool` methods. * Split `compair` into `compare_lt` etc. * Constructing a mixture of dice now effectively uses the old `lcm_joint` method, which reduces the denominator more aggressively. -## v1.2.0 +## v1.2.0 - 23 December 2023 * Experimental `Symbols` class representing a multiset of characters. * `marginals` now forwards `__getattr__` to outcomes, as long as the attribute name doesn't begin with an underscore. @@ -68,19 +68,19 @@ * `Mapping`s are now properly excluded from `Population.common_outcome_length`. * Fixed quoting in `repr` for populations. -## v1.1.2 +## v1.1.2 - 10 December 2023 * Add `z(n)`, which produces a die that runs from 0 to `n - 1` inclusive. * Add `Population.to_one_hot()`, which converts the die or deck to a one-hot representation. * Add `Die.mean_time_to_sum()`, which computes the mean number of rolls until the cumulative sum is greater or equal to the target. -## v1.1.1 +## v1.1.1 - 16 November 2023 * Fix non-fully-bound case of `MultisetEvaluator.evaluate()`. * Add `default` argument to `lowest(), highest(), middle()`. * Add `Population.entropy()`. -## v1.1.0 +## v1.1.0 - 15 October 2023 * `mean()`, `variance()`, etc. now return an exact `fractions.Fraction` when possible. (Note that `Fraction`s only support float-style formatting from Python 3.12.) * Rename `disjoint_union` to `additive_union`. @@ -88,21 +88,21 @@ * Symmetric difference (`^`) for multiset expressions is now a straight absolute difference of counts. * Add unary `+` operator for multiset expressions, which is the same as `keep_counts(0)`. -## v1.0.0 +## v1.0.0 - 22 July 2023 Improve some error messages. -## v0.29.3 +## v0.29.3 - 13 July 2023 Fix a bug in `MultisetExpression.keep_outcomes()` and `drop_outcomes()` regarding unbinding variables. -## v0.29.2 +## v0.29.2 - 12 July 2023 * `MultisetExpression.map_counts()` now accepts multiple arguments. * `MultisetExpression.keep_outcomes()` and `drop_outcomes()` now accept an expression as an argument. * `MultisetExpression.highest_outcome_and_count()` now returns the min outcome if no outcomes have positive count. -## v0.29.1 +## v0.29.1 - 2 July 2023 * `highest`, `lowest`, and `middle` can now take a single iterable argument. * Add `all_straights` evaluation. @@ -112,18 +112,18 @@ Fix a bug in `MultisetExpression.keep_outcomes()` and `drop_outcomes()` regardin * `expand` evaluator now allows order to be set. * Experimental `compair` evaluation. -## v0.29.0 +## v0.29.0 - 30 April 2023 * Add HTML and BBCode options for population formatting. * Renamed `apply` to `map` and the decorator version to `map_function`. * The above now uses `guess_star`. * Add default of 1 die for `Die.pool()`. -## v0.28.1 +## v0.28.1 - 23 April 2023 Fix mathematical bug in `Die.reroll` for limited depth. -## v0.28.0 +## v0.28.0 - 16 April 2023 Retired implicit elementwise operations on tuples. This is now handled by a new explicit `Vector` container. `cartesian_product()` is replaced by two new functions: `tupleize()` and `vectorize()`. @@ -139,27 +139,27 @@ Retired the linear algorithm for comparators for `Die`. While the quadratic algo `die.tuple_len()` renamed to `common_outcome_length`. Now applies to all sized outcome types. -## v0.27.1 +## v0.27.1 - 9 April 2023 * Counts type `Qs` is now invariant with more detailed typing in `Deal`. -## v0.27.0 +## v0.27.0 - 29 March 2023 Incremented two versions because I messed up the last version number. * `commonize_denominator` visible at top level. -## v0.25.6 +## v0.25.6 - 28 March 2023 * Rename `from_cumulative_quantities` to `from_cumulative` and allow die inputs. * Mark `multiset_function` experimental again and note more caveats in the docstring. -## v0.25.5 +## v0.25.5 - 11 March 2023 * Add missing variants of `nearest` and `quantities` methods. * Add optional `outcomes` argument to `quantities` and `probabilities` methods. -## v0.25.4 +## v0.25.4 - 17 February 2023 * `map` and similar functions will attempt to guess `star`. * Changed `positive_only` parameter to `expression.all_counts` to `filter`. @@ -169,7 +169,7 @@ Incremented two versions because I messed up the last version number. * Add `map_counts` expression. * `Reroll` in tuple outcomes and joint evaluations causes the whole thing to be rerolled. -## v0.25.3 +## v0.25.3 - 28 January 2023 * Tuple outcomes can now be compared with single outcomes. * Add `.keep, .highest, lowest, .middle` variants of `apply_sorted`. @@ -177,15 +177,15 @@ Incremented two versions because I messed up the last version number. * Add `keep_outcomes, drop_outcomes` methods to expressions. * Add `any` evaluation to expresions. -## v0.25.2 +## v0.25.2 - 23 January 2023 Comparisons on dice with tuple outcomes are now performed elementwise. -## v0.25.1 +## v0.25.1 - 23 January 2023 Testing GitHub workflows. -## 0.25.0 +## 0.25.0 - 22 January 2023 Expanded multiset processing with multiset expressions. @@ -202,7 +202,7 @@ Expanded multiset processing with multiset expressions. * Removed suits. * Stop using `__class_getitem__`, which is intended for typing only. -## 0.24.0 +## 0.24.0 - 8 January 2023 Reworked built-in generators and evaluators. @@ -214,7 +214,7 @@ Reworked built-in generators and evaluators. * Generators and evaluators are now paramterized by count type as well. * Move concrete evaluators to a submodule. -## 0.23.3 +## 0.23.3 - 31 December 2022 * Fixed weighting bug in `__matmul__` when the left die has an outcome of 0. * Retired the names `standard` and `bernoulli`. These will be just `d` and `coin` respectively. @@ -222,17 +222,17 @@ Reworked built-in generators and evaluators. * Reinstate automatic Cartesian product in `Population` construction. * `if_else` now runs in two stages. -## 0.23.2 +## 0.23.2 - 30 December 2022 * Incremental sorting for `all_matching_sets` to reduce state space. * `lowest()` and `highest()` now actually visible. * Improved checking for tuple outcome sortability and types. -## 0.23.1 +## 0.23.1 - 29 December 2022 Prepend `sum_` to OutcomeCountGenerator versions of `highest` and `lowest`. -## 0.23.0 +## 0.23.0 - 29 December 2022 Expanded typing, particularly in terms of parameterizing types. @@ -246,7 +246,7 @@ Expanded typing, particularly in terms of parameterizing types. * Added `cartesian_product()`, took this functionality out of the `Die` constructor for now. * Added `OutcomeCountGenerator.all_matching_sets()`. -## 0.22.0 +## 0.22.0 - 19 December 2022 * `Die.sub()` renamed to `Die.map()`. * `Die.map()` can now include the number of steps taken until absorption. @@ -255,7 +255,7 @@ Expanded typing, particularly in terms of parameterizing types. * Only tuples get separate columns in tables and not `str` or `bytes`. * Non-recursive algorithm for `Again()` handling. -## 0.21.0 +## 0.21.0 - 3 December 2022 * Nested lists are now allowed in the `Die()` constructor. * Single outcomes can be sent to the `Die()` constructor without wrapping them in a list. @@ -270,12 +270,12 @@ Expanded typing, particularly in terms of parameterizing types. * Add comparators to `Again`. * Experimental absorbing Markov chain analysis for `Die.sub(depth=None)`. -## 0.20.1 +## 0.20.1 - 10 September 2022 * Added `one_hot` function. * Added experimental suit generator that wraps a generator and produces counts for all suits for each value. -## 0.20.0 +## 0.20.0 - 6 September 2022 * Retired `denominator_method`. * Renamed `max_depth` parameters to just `depth`. @@ -284,11 +284,11 @@ Expanded typing, particularly in terms of parameterizing types. * `marginals` is now a Sequence, and can be iterated over, unpacked, etc. * `Die.sub()` now expands extra die arguments into their outcomes. -## 0.19.1 +## 0.19.1 - 21 August 2022 Fix `contains_again` checking of sequences. -## 0.19.0 +## 0.19.0 - 21 August 2022 New feature: `Again()`, a placeholder that allows to roll again with some modification. @@ -296,29 +296,29 @@ New feature: `Again()`, a placeholder that allows to roll again with some modifi * Add optional `final_kwargs` method to evaluators. * Rename `max_depth` parameter of `sub()` to `repeat`. -## 0.18.0 +## 0.18.0 - 19 August 2022 * Rename `Die.reduce()` to `Die.simplify()` to avoid confusion with the free function `reduce()`. * Rename `OutcomeCountEvaluator.direction()` to `order()` and add explicitly named `Order` enums. * Add `is_in`, `count`, and `count_in` methods to dice. * Add built-in evaluators as convenience functions of `OutcomeCountGenerator`. -## 0.17.4 +## 0.17.4 - 1 August 2022 * Fixes to `max_depth=None` case of `sub()`. * This is now marked experimental. -## 0.17.3 +## 0.17.3 - 1 August 2022 * Pass `star` parameter to `sub()` to recursive calls. -## 0.17.2 +## 0.17.2 - 1 August 2022 * `sub()` with `max_depth=None` now handles "monotonic" transitions with finite states. Full absorbing Markov chain calculation still under consideration. * `sub()` no longer accept sequence input. * Some minor formatting fixes. -## 0.17.1 +## 0.17.1 - 25 June 2022 * Standardize outcome count of `bernoulli`/`coin` and comparators. * `standard_pool` now accepts a `dict` argument. @@ -327,7 +327,7 @@ New feature: `Again()`, a placeholder that allows to roll again with some modifi * Pools are no longer resizable after creation. * `Die.pool()` now has mandatory argument, now accepts a sequence argument to set `sorted_roll_counts`. -## 0.17.0 +## 0.17.0 - 19 June 2022 More renaming, experimental `sample()` methods. @@ -340,7 +340,7 @@ More renaming, experimental `sample()` methods. * Allow formatting 0-1 probability. * Experimental `sample()` methods for `OutcomeCountGenerator` and `OutcomeCountEvaluator`. -## 0.16.1 +## 0.16.1 - 18 June 2022 Development of deck API. @@ -350,7 +350,7 @@ Development of deck API. * `Pool`s are not permitted to be constructed using a raw `Die` or `Deck` argument. * `reduce` argument of `Die.equals()` renamed to `reduce_weights`. -## 0.16.0 +## 0.16.0 - 17 June 2022 Significant API changes, experimental deck support. @@ -369,7 +369,7 @@ Significant API changes, experimental deck support. * Add `clear_pool_cache` function. * Forward `*extra_args` for `reroll, reroll_until, explode`. -## 0.15.0 +## 0.15.0 - 10 June 2022 Added type hints. Now requires Python 3.10 or later. @@ -379,11 +379,11 @@ Other changes: * Add `Die.set_range()`. * `standard()` / `d()` argument is now positional-only. -## 0.14.1 +## 0.14.1 - 30 May 2022 Reinstate alternate internal `EvalPool` algorithm, which provides better performance in some cases. -## 0.14.0 +## 0.14.0 - 30 May 2022 * Added a new `EvalPool.alignment()` method. This allows to specify an iterable of outcomes that should always be seen by `next_state` even if they have zero count. * The free function `d()` is now simply an alias for `standard()`. @@ -391,17 +391,17 @@ Reinstate alternate internal `EvalPool` algorithm, which provides better perform * The `@` operator now casts the right side to a `Die` like other operators. * Some internal changes to `EvalPool` algorithm. -## 0.13.2 +## 0.13.2 - 23 May 2022 The data of a die resulting from `==` or `!=` is lazily evaluated. This saves computation in case the caller is only interested in the truth value. -## 0.13.1 +## 0.13.1 - 23 May 2022 `EvalPool` favors the cached direction more. -## 0.13.0 +## 0.13.0 - 23 May 2022 Major reworking of pool construction. @@ -410,12 +410,12 @@ Major reworking of pool construction. * Pools can be of arbitrary dice, though non-truncative sets of dice will have lower performance. There is some performance penalty overall. * `apply()` called with no arguments now calls `func` once with no arguments. -## 0.12.1 +## 0.12.1 - 21 May 2022 * Removed `Die.keep()`. Use `Die.pool(...).sum()`. * `highest`/`lowest` returns empty die if any of the input dice are empty. -## 0.12.0 +## 0.12.0 - 18 May 2022 * Free-function form of `lowest`, `highest` now selects between algorithms for better performance and generality. * Removed `die.lowest, `die.highest`. @@ -426,7 +426,7 @@ Major reworking of pool construction. * `die.zero()` no longer reduces weights to 1. * Update PyPi classifiers. -## 0.11.0 +## 0.11.0 - 11 May 2022 * Removed `min_outcome` parameter from die construction. * Operations on tuple outcomes are now performed recursively, to match the fact that die expansion on construction is recursive. @@ -437,18 +437,18 @@ Major reworking of pool construction. * Added `star` parameter to `sub`, `explode`, `reroll`, `reroll_until` methods. If set, this unpacks outcomes before giving them to the supplied function. * Added experimental `JointEval` class for performing two evals on the same roll of a pool. -## 0.10.2 +## 0.10.2 - 8 May 2022 * Operators other than `[]` are performed element-wise on tuples. * Rename `DicePool` to just `Pool`. Merge the old factory function into the constructor. -## 0.10.1 +## 0.10.1 - 8 May 2022 * Fix denominator_method='reduce' in die creation. * Fix outcomes consisting of empty tuple `()`. * `apply()` with no dice produces an empty die. -## 0.10.0 +## 0.10.0 - 7 May 2022 Retired the `EmptyDie` / `ScalarDie` / `VectorDie` distinction. @@ -457,20 +457,20 @@ Retired the `EmptyDie` / `ScalarDie` / `VectorDie` distinction. * The `[]` operator now forwards to the outcome, acting similar to what `VectorDie.dim[]` used to do. * Removed `PoolEval.bind_dice()`. It was cute, but I'm not convinced it was worth spending API on. -## 0.9.1 +## 0.9.1 - 1 May 2022 * This will probably be the last version with a `VectorDie` distinction. * Dice cannot have negative weights. * `VectorDie` cannot be nested inside tuple outcomes. -## 0.9.0 +## 0.9.0 - 28 April 2022 * Die and dict arguments to `Die()` are now expanded, including when nested. * Add `Die.if_else()` method, which acts as a ternary conditional operator on outcomes. * Dice are now hashable. `==` and `!=` return dice with truth values based on whether the two dice have identical outcomes and weights. * `ndim` now uses singletons `icepool.Scalar` and `icepool.Empty`. -## 0.8.0 +## 0.8.0 - 23 April 2022 * `EvalPool.eval()` can now be provided with single rolls of a pool. This can be a dict-like mapping individual die outcomes to counts or a sequence of individual die outcomes. @@ -484,7 +484,7 @@ Retired the `EmptyDie` / `ScalarDie` / `VectorDie` distinction. * `DicePool` is no longer iterable, since there isn't an intuitive, unambiguous way of doing so. * `align_range()` now only operates on scalar outcomes. -## 0.7.0 +## 0.7.0 - 16 April 2022 * Renamed from `hdroller` to `icepool`. * Primary repository is now https://github.com/HighDiceRoller/icepool. From 07d4680e91ae304d2152fe506e3eed529c88e3cd Mon Sep 17 00:00:00 2001 From: Albert Julius Liu Date: Sun, 15 Sep 2024 00:25:55 -0700 Subject: [PATCH 04/12] clarify that `final_outcome` is output and `extra_outcomes` is input --- src/icepool/evaluator/multiset_evaluator.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/icepool/evaluator/multiset_evaluator.py b/src/icepool/evaluator/multiset_evaluator.py index 405566bd..7821e05c 100644 --- a/src/icepool/evaluator/multiset_evaluator.py +++ b/src/icepool/evaluator/multiset_evaluator.py @@ -100,7 +100,7 @@ def next_state(self, state: Hashable, outcome: T_contra, /, *counts: def final_outcome( self, final_state: Hashable ) -> 'U_co | icepool.Die[U_co] | icepool.RerollType': - """Optional function to generate a final outcome from a final state. + """Optional function to generate a final output outcome from a final state. By default, the final outcome is equal to the final state. Note that `None` is not a valid outcome for a `Die`, @@ -137,7 +137,7 @@ def order(self) -> Order: def extra_outcomes(self, outcomes: Sequence[T_contra]) -> Collection[T_contra]: - """Optional method to specify extra outcomes that should be seen by `next_state()`. + """Optional method to specify extra outcomes that should be seen as inputs to `next_state()`. These will be seen by `next_state` even if they do not appear in the generator(s). The default implementation returns `()`, or no additional From bc7550f544eb0c9d7d2dd4afe18d86807ce503e6 Mon Sep 17 00:00:00 2001 From: Albert Julius Liu Date: Fri, 20 Sep 2024 18:20:32 -0700 Subject: [PATCH 05/12] v1.6.0 * Breaking change: outcomes with zero quantities are removed when constructing `Die` and `Deck`. * Functions and methods relating to zero-quantities are removed: `align()`, `align_range()`, `Population.has_zero_quantities()`, `Die.trim()`, `Die.set_range()`, `Die.set_outcomes()`. * You can use `consecutive()` or `sorted_union()` to get an appropriate superset of sets of outcomes. * Breaking change: `MultisetEvaluator.alignment()` is renamed to `MultisetEvaluator.extra_outcomes()`. * `MultisetEvaluator.range_alignment()` is renamed to `MultisetEvaluator.consecutive()`. * The `Alignment` class is no longer public. * Breaking change: `Deck.multiply_counts()` and `Population.scale_quantities()` are replaced/renamed to `Population.multiply_quantities()` etc. * Add `Deck.sequence()` and `Die.sequence()` method. * Add `Population.pad_to_denominator()` method. * Move `zero()` and `zero_outcome()` from `Die` to `Population`. * `@` operator now sums left-to-right. * Remove old `compair` evaluation. * `min_outcome()` and `max_outcome()` free functions can now be called using a single iterable argument. * Forward algorithm now has a persistent cache. * Add skip optimization for single deals with keep tuples. * Pools now only skip dice, not outcomes. This is a bit slower in some cases but provides more consistent iteration order. * Add shared evaluator instances for some built-in evaluator for caching. * Simplify determination of outcome order for multiset evaluations. * Simplify implementation of generator unbinding. * Fix `extra_args` expansion for `map_and_time`. --- CHANGELOG.md | 3 ++- src/icepool/__init__.py | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 591b7f97..95be19cf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,4 @@ -## v1.6.0 +## v1.6.0 - 20 September 2024 * Breaking change: outcomes with zero quantities are removed when constructing `Die` and `Deck`. * Functions and methods relating to zero-quantities are removed: `align()`, `align_range()`, `Population.has_zero_quantities()`, `Die.trim()`, `Die.set_range()`, `Die.set_outcomes()`. @@ -19,6 +19,7 @@ * Add shared evaluator instances for some built-in evaluator for caching. * Simplify determination of outcome order for multiset evaluations. * Simplify implementation of generator unbinding. +* Fix `extra_args` expansion for `map_and_time`. ## v1.5.0 - 23 August 2024 diff --git a/src/icepool/__init__.py b/src/icepool/__init__.py index 936dbe0e..d772bb3c 100644 --- a/src/icepool/__init__.py +++ b/src/icepool/__init__.py @@ -17,7 +17,7 @@ __docformat__ = 'google' -__version__ = '1.6.0a0' +__version__ = '1.6.0' from typing import Final From baf2484f913e08a0e82755a7858eff2c8aecd77d Mon Sep 17 00:00:00 2001 From: Albert Julius Liu Date: Fri, 20 Sep 2024 18:42:07 -0700 Subject: [PATCH 06/12] notebooks to 1.6.0 --- jupyter_lite_config.json | 2 +- notebooks/cthulhutech.ipynb | 2 +- notebooks/decks/limited_wildcard.ipynb | 2 +- notebooks/tutorial/c08_evaluators.ipynb | 8 ++++---- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/jupyter_lite_config.json b/jupyter_lite_config.json index 9d70bfd7..0f31d7be 100644 --- a/jupyter_lite_config.json +++ b/jupyter_lite_config.json @@ -10,7 +10,7 @@ }, "PipliteAddon": { "piplite_urls" : [ - "https://files.pythonhosted.org/packages/py3/i/icepool/icepool-1.5.0-py3-none-any.whl", + "https://files.pythonhosted.org/packages/py3/i/icepool/icepool-1.6.0-py3-none-any.whl", "https://files.pythonhosted.org/packages/py2.py3/i/ipywidgets/ipywidgets-7.7.1-py2.py3-none-any.whl", "https://files.pythonhosted.org/packages/py3/j/jupyterlab-widgets/jupyterlab_widgets-1.1.1-py3-none-any.whl" ] diff --git a/notebooks/cthulhutech.ipynb b/notebooks/cthulhutech.ipynb index c4e3edcf..02c86e60 100644 --- a/notebooks/cthulhutech.ipynb +++ b/notebooks/cthulhutech.ipynb @@ -28,7 +28,7 @@ }, { "cell_type": "code", - "source": "%pip install icepool\n\nimport icepool\n\nclass CthulhuTechEval(icepool.MultisetEvaluator):\n def next_state(self, state, outcome, count):\n score, run = state or (0, 0)\n if count > 0:\n set_score = outcome * count\n run_score = 0\n run += 1\n if run >= 3:\n # This could be the triangular formula, but it's clearer this way.\n for i in range(run): run_score += (outcome - i)\n score = max(set_score, run_score, score)\n else:\n # No dice rolled this number, so the score remains the same.\n run = 0\n return score, run\n\n def final_outcome(self, final_state):\n # Return just the score.\n return final_state[0]\n \n # Outcomes should be seen in consecutive order.\n alignment = icepool.MultisetEvaluator.range_alignment\n\nimport matplotlib.pyplot as plt\n\ndefault_colors = plt.rcParams['axes.prop_cycle'].by_key()['color']\nevaluator = CthulhuTechEval()\nfigsize = (16, 9)\nfig, ax = plt.subplots(figsize=figsize)\n\nfor num_dice in range(1, 11):\n pool = icepool.d10.pool(num_dice)\n result = evaluator.evaluate(pool)\n result, _, _ = icepool.align_range(result, 0, 100)\n ax.plot(result.outcomes(), result.probabilities('>=', percent=True))\n marker_size = 64 if num_dice < 10 else 128\n ax.scatter(result.median(), 50.0,\n marker=('$%d$' % num_dice),\n facecolor=default_colors[num_dice-1],\n s=marker_size)\n\nax.set_xticks(range(0, 61, 5))\nax.set_yticks(range(0, 101, 10))\nax.set_xlim(0, 60)\nax.set_ylim(0, 100)\nax.set_xlabel('Result')\nax.set_ylabel('Chance of getting at least (%)')\nax.grid()\nplt.show()", + "source": "%pip install icepool\n\nimport icepool\n\nclass CthulhuTechEval(icepool.MultisetEvaluator):\n def next_state(self, state, outcome, count):\n score, run = state or (0, 0)\n if count > 0:\n set_score = outcome * count\n run_score = 0\n run += 1\n if run >= 3:\n # This could be the triangular formula, but it's clearer this way.\n for i in range(run): run_score += (outcome - i)\n score = max(set_score, run_score, score)\n else:\n # No dice rolled this number, so the score remains the same.\n run = 0\n return score, run\n\n def final_outcome(self, final_state):\n # Return just the score.\n return final_state[0]\n \n # Outcomes should be seen in consecutive order.\n extra_outcomes = icepool.MultisetEvaluator.consecutive\n\nimport matplotlib.pyplot as plt\n\ndefault_colors = plt.rcParams['axes.prop_cycle'].by_key()['color']\nevaluator = CthulhuTechEval()\nfigsize = (16, 9)\nfig, ax = plt.subplots(figsize=figsize)\n\nfor num_dice in range(1, 11):\n pool = icepool.d10.pool(num_dice)\n result = evaluator.evaluate(pool)\n ax.plot([x for x in range(101)], [result.probability('>=', percent=True) for x in range(101)])\n marker_size = 64 if num_dice < 10 else 128\n ax.scatter(result.median(), 50.0,\n marker=('$%d$' % num_dice),\n facecolor=default_colors[num_dice-1],\n s=marker_size)\n\nax.set_xticks(range(0, 61, 5))\nax.set_yticks(range(0, 101, 10))\nax.set_xlim(0, 60)\nax.set_ylim(0, 100)\nax.set_xlabel('Result')\nax.set_ylabel('Chance of getting at least (%)')\nax.grid()\nplt.show()", "metadata": { "trusted": true }, diff --git a/notebooks/decks/limited_wildcard.ipynb b/notebooks/decks/limited_wildcard.ipynb index 003e416e..be5f8cc9 100644 --- a/notebooks/decks/limited_wildcard.ipynb +++ b/notebooks/decks/limited_wildcard.ipynb @@ -28,7 +28,7 @@ }, { "cell_type": "code", - "source": "%pip install icepool\n\nimport icepool\nimport time\n\nclass EvalWildcard(icepool.MultisetEvaluator):\n def next_state(self, state, outcome, target, *counts):\n # state = the number of wildcards needed.\n if state is None:\n state = 0\n total_count = sum(counts)\n # Final: wildcards.\n if outcome == 'W':\n if state == 'fail':\n return False\n else:\n return total_count >= state\n if state == 'fail':\n return state\n # Could potentially use wildcards.\n if outcome >= 'C':\n return state + max(target - total_count, 0)\n # Ineligible for wildcards.\n if total_count < target:\n return 'fail'\n return state\n\n def order(self):\n # See outcomes in ascending order, i.e A -> W.\n return 1\n\n def alignment(self, *_):\n # Always process wildcard.\n return ('W',)\n\n# When expressed as a sequence, each appearance counts as one card.\ntarget = list('ABBCDDEEE')\n# When expressed as a dict, the value gives the number of cards.\ndeal = icepool.Deck({'A': 3, 'B': 8, 'C': 2, 'D': 3, 'E': 3, 'F': 113, 'W': 7}).deal(35)\nhand = list('W')\n\nevaluator = EvalWildcard()\n\nstart_ns = time.perf_counter_ns()\n\n# The counts resulting from the three arguments are supplied as\n# the last three arguments to next_state.\nresult = evaluator.evaluate(target, deal, hand)\n\nend_ns = time.perf_counter_ns()\nelapsed_ms = (end_ns - start_ns) * 1e-6\nprint(f'Computation time: {elapsed_ms:0.1f} ms')\n\nprint(f'{result:md:o|q==|%==}')", + "source": "%pip install icepool\n\nimport icepool\nimport time\n\nclass EvalWildcard(icepool.MultisetEvaluator):\n def next_state(self, state, outcome, target, *counts):\n # state = the number of wildcards needed.\n if state is None:\n state = 0\n total_count = sum(counts)\n # Final: wildcards.\n if outcome == 'W':\n if state == 'fail':\n return False\n else:\n return total_count >= state\n if state == 'fail':\n return state\n # Could potentially use wildcards.\n if outcome >= 'C':\n return state + max(target - total_count, 0)\n # Ineligible for wildcards.\n if total_count < target:\n return 'fail'\n return state\n\n def order(self):\n # See outcomes in ascending order, i.e A -> W.\n return 1\n\n def extra_outcomes(self, outcomes):\n # Always process wildcard.\n return ('W',)\n\n# When expressed as a sequence, each appearance counts as one card.\ntarget = list('ABBCDDEEE')\n# When expressed as a dict, the value gives the number of cards.\ndeal = icepool.Deck({'A': 3, 'B': 8, 'C': 2, 'D': 3, 'E': 3, 'F': 113, 'W': 7}).deal(35)\nhand = list('W')\n\nevaluator = EvalWildcard()\n\nstart_ns = time.perf_counter_ns()\n\n# The counts resulting from the three arguments are supplied as\n# the last three arguments to next_state.\nresult = evaluator.evaluate(target, deal, hand)\n\nend_ns = time.perf_counter_ns()\nelapsed_ms = (end_ns - start_ns) * 1e-6\nprint(f'Computation time: {elapsed_ms:0.1f} ms')\n\nprint(f'{result:md:o|q==|%==}')", "metadata": { "trusted": true }, diff --git a/notebooks/tutorial/c08_evaluators.ipynb b/notebooks/tutorial/c08_evaluators.ipynb index c704b4cc..4a23deaf 100644 --- a/notebooks/tutorial/c08_evaluators.ipynb +++ b/notebooks/tutorial/c08_evaluators.ipynb @@ -83,12 +83,12 @@ }, { "cell_type": "markdown", - "source": "### `alignment`\n\n`alignment` allows you to guarantee that certain outcomes are not skipped even if they have zero count. For example, in the case of straights, this allows us to skip checking the previous outcome.", + "source": "### `extra_outcomes`\n\n`extra_outcomes` allows you to guarantee that certain outcomes are not skipped even if they have zero count. For example, in the case of straights, this allows us to skip checking the previous outcome.", "metadata": {} }, { "cell_type": "code", - "source": "class LargestStraightEvaluator(MultisetEvaluator):\n\n def next_state(self, state, outcome, count):\n best_run, run = state or (0, 0)\n if count >= 1:\n run += 1\n else:\n run = 0\n return max(best_run, run), run\n\n def final_outcome(self, final_state):\n # Return just the length of the best run.\n return final_state[0]\n \n # This guarantees that we see all consecutive integers.\n alignment = MultisetEvaluator.range_alignment\n \nlargest_straight_evaluator = LargestStraightEvaluator()\nprint(largest_straight_evaluator(d6.pool(5)))", + "source": "class LargestStraightEvaluator(MultisetEvaluator):\n\n def next_state(self, state, outcome, count):\n best_run, run = state or (0, 0)\n if count >= 1:\n run += 1\n else:\n run = 0\n return max(best_run, run), run\n\n def final_outcome(self, final_state):\n # Return just the length of the best run.\n return final_state[0]\n \n # This guarantees that we see all consecutive integers.\n extra_outcomes = MultisetEvaluator.consecutive\n \nlargest_straight_evaluator = LargestStraightEvaluator()\nprint(largest_straight_evaluator(d6.pool(5)))", "metadata": { "trusted": true }, @@ -108,7 +108,7 @@ }, { "cell_type": "code", - "source": "class BadStraightEvaluator(MultisetEvaluator):\n\n def next_state(self, state, outcome, count):\n best_run, run = state or (0, 0)\n if count >= 1:\n run += 1\n else:\n run = 0\n return max(best_run, run), run\n\n def final_outcome(self, final_state):\n # Return just the length of the best run.\n return final_state[0]\n \n # Missing alignment.\n\nbad_evaluator = BadStraightEvaluator()\nprint(bad_evaluator([1, 2, 2, 5, 6]))\nprint(largest_straight_evaluator([1, 2, 2, 5, 6]))", + "source": "class BadStraightEvaluator(MultisetEvaluator):\n\n def next_state(self, state, outcome, count):\n best_run, run = state or (0, 0)\n if count >= 1:\n run += 1\n else:\n run = 0\n return max(best_run, run), run\n\n def final_outcome(self, final_state):\n # Return just the length of the best run.\n return final_state[0]\n \n # Missing extra_outcomes.\n\nbad_evaluator = BadStraightEvaluator()\nprint(bad_evaluator([1, 2, 2, 5, 6]))\nprint(largest_straight_evaluator([1, 2, 2, 5, 6]))", "metadata": { "trusted": true }, @@ -128,7 +128,7 @@ }, { "cell_type": "code", - "source": "from icepool import Order\n\nclass LargestStraightAndOutcomeEvaluator(MultisetEvaluator):\n\n def next_state(self, state, outcome, count):\n best_run, run = state or ((0, outcome), 0)\n if count >= 1:\n run += 1\n else:\n run = 0\n return max(best_run, (run, outcome)), run\n\n def final_outcome(self, final_state):\n return final_state[0]\n \n def order(self, *_):\n return Order.Ascending\n \n # This guarantees that we see all consecutive integers.\n alignment = MultisetEvaluator.range_alignment\n\nbest_straight_evaluator = LargestStraightAndOutcomeEvaluator()\nprint(best_straight_evaluator(d6.pool(5)))", + "source": "from icepool import Order\n\nclass LargestStraightAndOutcomeEvaluator(MultisetEvaluator):\n\n def next_state(self, state, outcome, count):\n best_run, run = state or ((0, outcome), 0)\n if count >= 1:\n run += 1\n else:\n run = 0\n return max(best_run, (run, outcome)), run\n\n def final_outcome(self, final_state):\n return final_state[0]\n \n def order(self, *_):\n return Order.Ascending\n \n # This guarantees that we see all consecutive integers.\n extra_outcomes = MultisetEvaluator.consecutive\n\nbest_straight_evaluator = LargestStraightAndOutcomeEvaluator()\nprint(best_straight_evaluator(d6.pool(5)))", "metadata": { "tags": [], "trusted": true From 298635d5e2fc9e58c10b45df8e8df4cbeb584ec5 Mon Sep 17 00:00:00 2001 From: Albert Julius Liu Date: Fri, 20 Sep 2024 18:58:33 -0700 Subject: [PATCH 07/12] ability_scores.html to v1.6.0 --- apps/ability_scores.html | 34 +++++++++++++--------------------- 1 file changed, 13 insertions(+), 21 deletions(-) diff --git a/apps/ability_scores.html b/apps/ability_scores.html index 844e4e7f..6bb750a3 100644 --- a/apps/ability_scores.html +++ b/apps/ability_scores.html @@ -275,8 +275,6 @@

How does it work?