-
Notifications
You must be signed in to change notification settings - Fork 85
/
zonal.py
2012 lines (1673 loc) · 63.2 KB
/
zonal.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# standard library
import copy
from math import sqrt
from typing import Callable, Dict, List, Optional, Union
# 3rd-party
import dask.array as da
import dask.dataframe as dd
import numpy as np
import pandas as pd
import xarray as xr
from dask import delayed
from xarray import DataArray
try:
import cupy
except ImportError:
class cupy(object):
ndarray = False
# local modules
from xrspatial.utils import ArrayTypeFunctionMapping, ngjit, not_implemented_func, validate_arrays
TOTAL_COUNT = '_total_count'
def _stats_count(data):
if isinstance(data, np.ndarray):
# numpy case
stats_count = np.ma.count(data)
elif isinstance(data, cupy.ndarray):
# cupy case
stats_count = np.prod(data.shape)
else:
# dask case
stats_count = data.size - da.ma.getmaskarray(data).sum()
return stats_count
_DEFAULT_STATS = dict(
mean=lambda z: z.mean(),
max=lambda z: z.max(),
min=lambda z: z.min(),
sum=lambda z: z.sum(),
std=lambda z: z.std(),
var=lambda z: z.var(),
count=lambda z: _stats_count(z),
)
_DASK_BLOCK_STATS = dict(
max=lambda z: z.max(),
min=lambda z: z.min(),
sum=lambda z: z.sum(),
count=lambda z: _stats_count(z),
sum_squares=lambda z: (z**2).sum()
)
_DASK_STATS = dict(
max=lambda block_maxes: np.nanmax(block_maxes, axis=0),
min=lambda block_mins: np.nanmin(block_mins, axis=0),
sum=lambda block_sums: np.nansum(block_sums, axis=0),
count=lambda block_counts: np.nansum(block_counts, axis=0),
sum_squares=lambda block_sum_squares: np.nansum(block_sum_squares, axis=0),
squared_sum=lambda block_sums: np.nansum(block_sums, axis=0)**2,
)
def _dask_mean(sums, counts): return sums / counts # noqa
def _dask_std(sum_squares, squared_sum, n): return np.sqrt((sum_squares - squared_sum/n) / n) # noqa
def _dask_var(sum_squares, squared_sum, n): return (sum_squares - squared_sum/n) / n # noqa
@ngjit
def _strides(flatten_zones, unique_zones):
num_elements = flatten_zones.shape[0]
num_zones = len(unique_zones)
strides = np.zeros(len(unique_zones), dtype=np.int32)
count = 0
for i in range(num_zones):
while (count < num_elements) and (
flatten_zones[count] == unique_zones[i]):
count += 1
strides[i] = count
return strides
def _sort_and_stride(zones, values, unique_zones):
flatten_zones = zones.ravel()
sorted_indices = np.argsort(flatten_zones)
sorted_zones = flatten_zones[sorted_indices]
values_shape = values.shape
if len(values_shape) == 3:
values_by_zones = copy.deepcopy(values).reshape(
values_shape[0], values_shape[1] * values_shape[2])
for i in range(values_shape[0]):
values_by_zones[i] = values_by_zones[i][sorted_indices]
else:
values_by_zones = values.ravel()[sorted_indices]
# exclude nans from calculation
# flatten_zones is already sorted, NaN elements (if any) are at the end
# of the array, removing them will not affect data before them
sorted_zones = sorted_zones[np.isfinite(sorted_zones)]
zone_breaks = _strides(sorted_zones, unique_zones)
return sorted_indices, values_by_zones, zone_breaks
def _calc_stats(
values_by_zones: np.array,
zone_breaks: np.array,
unique_zones: np.array,
zone_ids: np.array,
func: Callable,
nodata_values: Union[int, float] = None,
):
start = 0
results = np.full(unique_zones.shape, np.nan)
for i in range(len(unique_zones)):
end = zone_breaks[i]
if unique_zones[i] in zone_ids:
zone_values = values_by_zones[start:end]
# filter out non-finite and nodata_values
zone_values = zone_values[np.isfinite(zone_values) & (zone_values != nodata_values)]
if len(zone_values) > 0:
results[i] = func(zone_values)
start = end
return results
@delayed
def _single_stats_func(
zones_block: np.array,
values_block: np.array,
unique_zones: np.array,
zone_ids: np.array,
func: Callable,
nodata_values: Union[int, float] = None,
) -> pd.DataFrame:
_, values_by_zones, zone_breaks = _sort_and_stride(zones_block, values_block, unique_zones)
results = _calc_stats(values_by_zones, zone_breaks, unique_zones, zone_ids, func, nodata_values)
return results
def _stats_dask_numpy(
zones: da.Array,
values: da.Array,
zone_ids: List[Union[int, float]],
stats_funcs: Dict,
nodata_values: Union[int, float],
) -> pd.DataFrame:
# find ids for all zones
unique_zones = np.unique(zones[np.isfinite(zones)])
select_all_zones = False
# selecte zones to do analysis
if zone_ids is None:
zone_ids = unique_zones
select_all_zones = True
zones_blocks = zones.to_delayed().ravel()
values_blocks = values.to_delayed().ravel()
stats_dict = {}
stats_dict["zone"] = unique_zones # zone column
compute_sum_squares = False
compute_sum = False
compute_count = False
if 'mean' or 'std' or 'var' in stats_funcs:
compute_sum = True
compute_count = True
if 'std' or 'var' in stats_funcs:
compute_sum_squares = True
basis_stats = [s for s in _DASK_BLOCK_STATS if s in stats_funcs]
if compute_count and 'count' not in basis_stats:
basis_stats.append('count')
if compute_sum and 'sum' not in basis_stats:
basis_stats.append('sum')
if compute_sum_squares:
basis_stats.append('sum_squares')
dask_dtypes = dict(
max=values.dtype,
min=values.dtype,
sum=values.dtype,
count=np.int64,
sum_squares=values.dtype,
squared_sum=values.dtype,
)
for s in basis_stats:
if s == 'sum_squares' and not compute_sum_squares:
continue
stats_func = _DASK_BLOCK_STATS.get(s)
stats_by_block = [
da.from_delayed(
delayed(_single_stats_func)(
z, v, unique_zones, zone_ids, stats_func, nodata_values
), shape=(np.nan,), dtype=dask_dtypes[s]
)
for z, v in zip(zones_blocks, values_blocks)
]
zonal_stats = da.stack(stats_by_block, allow_unknown_chunksizes=True)
stats_func_by_block = delayed(_DASK_STATS[s])
stats_dict[s] = da.from_delayed(
stats_func_by_block(zonal_stats), shape=(np.nan,), dtype=np.float64
)
if 'mean' in stats_funcs:
stats_dict['mean'] = _dask_mean(stats_dict['sum'], stats_dict['count'])
if 'std' in stats_funcs:
stats_dict['std'] = _dask_std(
stats_dict['sum_squares'], stats_dict['sum'] ** 2, stats_dict['count']
)
if 'var' in stats_funcs:
stats_dict['var'] = _dask_var(
stats_dict['sum_squares'], stats_dict['sum'] ** 2, stats_dict['count']
)
# generate dask dataframe
stats_df = dd.concat([dd.from_dask_array(s) for s in stats_dict.values()], axis=1)
# name columns
stats_df.columns = stats_dict.keys()
# select columns
stats_df = stats_df[['zone'] + list(stats_funcs.keys())]
if not select_all_zones:
# only return zones specified in `zone_ids`
selected_rows = []
for index, row in stats_df.iterrows():
if row['zone'] in zone_ids:
selected_rows.append(stats_df.loc[index])
stats_df = dd.concat(selected_rows)
return stats_df
def _stats_numpy(
zones: xr.DataArray,
values: xr.DataArray,
zone_ids: List[Union[int, float]],
stats_funcs: Dict,
nodata_values: Union[int, float],
return_type: str,
) -> Union[pd.DataFrame, np.ndarray]:
# find ids for all zones
unique_zones = np.unique(zones[np.isfinite(zones)])
# selected zones to do analysis
if zone_ids is None:
zone_ids = unique_zones
else:
zone_ids = np.unique(zone_ids)
# remove zones that do not exist in `zones` raster
zone_ids = [z for z in zone_ids if z in unique_zones]
sorted_indices, values_by_zones, zone_breaks = _sort_and_stride(zones, values, unique_zones)
if return_type == 'pandas.DataFrame':
stats_dict = {}
stats_dict["zone"] = zone_ids
selected_indexes = [i for i, z in enumerate(unique_zones) if z in zone_ids]
for stats in stats_funcs:
func = stats_funcs.get(stats)
stats_dict[stats] = _calc_stats(
values_by_zones, zone_breaks,
unique_zones, zone_ids, func, nodata_values
)
stats_dict[stats] = stats_dict[stats][selected_indexes]
result = pd.DataFrame(stats_dict)
else:
result = np.full((len(stats_funcs), values.size), np.nan)
zone_ids_map = {z: i for i, z in enumerate(unique_zones) if z in zone_ids}
stats_id = 0
for stats in stats_funcs:
func = stats_funcs.get(stats)
stats_results = _calc_stats(
values_by_zones, zone_breaks,
unique_zones, zone_ids, func, nodata_values
)
for zone in zone_ids:
iz = zone_ids_map[zone] # position of zone in unique_zones
if iz == 0:
zs = sorted_indices[: zone_breaks[iz]]
else:
zs = sorted_indices[zone_breaks[iz-1]: zone_breaks[iz]]
result[stats_id][zs] = stats_results[iz]
stats_id += 1
result = result.reshape(len(stats_funcs), *values.shape)
return result
def _stats_cupy(
orig_zones: xr.DataArray,
orig_values: xr.DataArray,
zone_ids: List[Union[int, float]],
stats_funcs: Dict,
nodata_values: Union[int, float],
) -> pd.DataFrame:
# TODO add support for 3D input
if len(orig_values.shape) > 2:
raise TypeError('3D inputs not supported for cupy backend')
zones = cupy.ravel(orig_zones)
values = cupy.ravel(orig_values)
sorted_indices = cupy.argsort(zones)
sorted_zones = zones[sorted_indices]
values_by_zone = values[sorted_indices]
# filter out values that are non-finite or values equal to nodata_values
if nodata_values:
filter_values = cupy.isfinite(values_by_zone) & (
values_by_zone != nodata_values)
else:
filter_values = cupy.isfinite(values_by_zone)
values_by_zone = values_by_zone[filter_values]
sorted_zones = sorted_zones[filter_values]
# Now I need to find the unique zones, and zone breaks
unique_zones, unique_index, unique_counts = cupy.unique(
sorted_zones, return_index=True, return_counts=True)
# Transfer to the host
unique_index = unique_index.get()
unique_counts = unique_counts.get()
unique_zones = unique_zones.get()
if zone_ids is not None:
# We need to extract the index and element count
# only for the elements in zone_ids
unique_index_lst = []
unique_counts_lst = []
unique_zones = list(unique_zones)
for z in zone_ids:
try:
idx = unique_zones.index(z)
unique_index_lst.append(unique_index[idx])
unique_counts_lst.append(unique_counts[idx])
except ValueError:
continue
unique_zones = zone_ids
unique_counts = unique_counts_lst
unique_index = unique_index_lst
# stats columns
stats_dict = {'zone': []}
for stats in stats_funcs:
stats_dict[stats] = []
for i in range(len(unique_zones)):
zone_id = unique_zones[i]
# skip zone_id == nodata_zones, and non-finite zone ids
if not np.isfinite(zone_id):
continue
stats_dict['zone'].append(zone_id)
# extract zone_values
zone_values = values_by_zone[unique_index[i]:unique_index[i]+unique_counts[i]]
# apply stats on the zone data
for j, stats in enumerate(stats_funcs):
stats_func = stats_funcs.get(stats)
if not callable(stats_func):
raise ValueError(stats)
result = stats_func(zone_values)
assert(len(result.shape) == 0)
stats_dict[stats].append(cupy.float_(result))
stats_df = pd.DataFrame(stats_dict)
stats_df.set_index("zone")
return stats_df
def stats(
zones: xr.DataArray,
values: xr.DataArray,
zone_ids: Optional[List[Union[int, float]]] = None,
stats_funcs: Union[Dict, List] = [
"mean",
"max",
"min",
"sum",
"std",
"var",
"count",
],
nodata_values: Union[int, float] = None,
return_type: str = 'pandas.DataFrame',
) -> Union[pd.DataFrame, dd.DataFrame, xr.DataArray]:
"""
Calculate summary statistics for each zone defined by a `zones`
dataset, based on `values` aggregate.
A single output value is computed for every zone in the input `zones`
dataset.
This function currently supports numpy backed, and dask with numpy backed
xarray DataArrays.
Parameters
----------
zones : xr.DataArray
zones is a 2D xarray DataArray of numeric values.
A zone is all the cells in a raster that have the same value,
whether or not they are contiguous. The input `zones` raster defines
the shape, values, and locations of the zones. An integer field
in the input `zones` DataArray defines a zone.
values : xr.DataArray
values is a 2D xarray DataArray of numeric values (integers or floats).
The input `values` raster contains the input values used in
calculating the output statistic for each zone. In dask case,
the chunksizes of `zones` and `values` should be matching. If not,
`values` will be rechunked to be the same as of `zones`.
zone_ids : list of ints, or floats
List of zones to be included in calculation. If no zone_ids provided,
all zones will be used.
stats_funcs : dict, or list of strings, default=['mean', 'max', 'min',
'sum', 'std', 'var', 'count']
The statistics to calculate for each zone. If a list, possible
choices are subsets of the default options.
In the dictionary case, all of its values must be
callable. Function takes only one argument that is the `values` raster.
The key become the column name in the output DataFrame.
Note that if `zones` and `values` are dask backed DataArrays,
`stats_funcs` must be provided as a list that is a subset of
default supported stats.
nodata_values: int, float, default=None
Nodata value in `values` raster.
Cells with `nodata_values` do not belong to any zone,
and thus excluded from calculation.
return_type: str, default='pandas.DataFrame'
Format of returned data. If `zones` and `values` numpy backed xarray DataArray,
allowed values are 'pandas.DataFrame', and 'xarray.DataArray'.
Otherwise, only 'pandas.DataFrame' is supported.
Returns
-------
stats_df : Union[pandas.DataFrame, dask.dataframe.DataFrame]
A pandas DataFrame, or a dask DataFrame where each column
is a statistic and each row is a zone with zone id.
Examples
--------
stats() works with NumPy backed DataArray
.. sourcecode:: python
>>> import numpy as np
>>> import xarray as xr
>>> from xrspatial.zonal import stats
>>> height, width = 10, 10
>>> values_data = np.array([
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
[20, 21, 22, 23, 24, 25, 26, 27, 28, 29],
[30, 31, 32, 33, 34, 35, 36, 37, 38, 39],
[40, 41, 42, 43, 44, 45, 46, 47, 48, 49],
[50, 51, 52, 53, 54, 55, 56, 57, 58, 59],
[60, 61, 62, 63, 64, 65, 66, 67, 68, 69],
[70, 71, 72, 73, 74, 75, 76, 77, 78, 79],
[80, 81, 82, 83, 84, 85, 86, 87, 88, 89],
[90, 91, 92, 93, 94, 95, 96, 97, 98, 99]])
>>> values = xr.DataArray(values_data)
>>> zones_data = np.array([
[ 0., 0., 0., 0., 0., 10., 10., 10., 10., 10.],
[ 0., 0., 0., 0., 0., 10., 10., 10., 10., 10.],
[ 0., 0., 0., 0., 0., 10., 10., 10., 10., 10.],
[ 0., 0., 0., 0., 0., 10., 10., 10., 10., 10.],
[ 0., 0., 0., 0., 0., 10., 10., 10., 10., 10.],
[20., 20., 20., 20., 20., 30., 30., 30., 30., 30.],
[20., 20., 20., 20., 20., 30., 30., 30., 30., 30.],
[20., 20., 20., 20., 20., 30., 30., 30., 30., 30.],
[20., 20., 20., 20., 20., 30., 30., 30., 30., 30.],
[20., 20., 20., 20., 20., 30., 30., 30., 30., 30.]])
>>> zones = xr.DataArray(zones_data)
>>> # Calculate Stats
>>> stats_df = stats(zones=zones, values=values)
>>> print(stats_df)
zone mean max min sum std var count
0 0 22.0 44 0 550 14.21267 202.0 25
1 10 27.0 49 5 675 14.21267 202.0 25
2 20 72.0 94 50 1800 14.21267 202.0 25
3 30 77.0 99 55 1925 14.21267 202.0 25
>>> # Custom Stats
>>> custom_stats ={'double_sum': lambda val: val.sum()*2}
>>> custom_stats_df = stats(zones=zones,
values=values,
stats_funcs=custom_stats)
>>> print(custom_stats_df)
zone double_sum
0 0 1100
1 10 1350
2 20 3600
3 30 3850
stats() works with Dask with NumPy backed DataArray
>>> import dask.array as da
>>> import dask.array as da
>>> values_dask = xr.DataArray(da.from_array(values_data, chunks=(3, 3)))
>>> zones_dask = xr.DataArray(da.from_array(zones_data, chunks=(3, 3)))
>>> # Calculate Stats with dask backed xarray DataArrays
>>> dask_stats_df = stats(zones=zones_dask, values=values_dask)
>>> print(type(dask_stats_df))
<class 'dask.dataframe.core.DataFrame'>
>>> print(dask_stats_df.compute())
zone mean max min sum std var count
0 0 22.0 44 0 550 14.21267 202.0 25
1 10 27.0 49 5 675 14.21267 202.0 25
2 20 72.0 94 50 1800 14.21267 202.0 25
3 30 77.0 99 55 1925 14.21267 202.0 25
"""
validate_arrays(zones, values)
if not (
issubclass(zones.data.dtype.type, np.integer)
or issubclass(zones.data.dtype.type, np.floating)
):
raise ValueError("`zones` must be an array of integers or floats.")
if not (
issubclass(values.data.dtype.type, np.integer)
or issubclass(values.data.dtype.type, np.floating)
):
raise ValueError("`values` must be an array of integers or floats.")
# validate stats_funcs
if isinstance(values.data, da.Array) and not isinstance(stats_funcs, list):
raise ValueError(
"Got dask-backed DataArray as `values` aggregate. "
"`stats_funcs` must be a subset of default supported stats "
"`[\'mean\', \'max\', \'min\', \'sum\', \'std\', \'var\', \'count\']`"
)
if isinstance(stats_funcs, list):
# create a dict of stats
stats_funcs_dict = {}
for stats in stats_funcs:
func = _DEFAULT_STATS.get(stats, None)
if func is None:
err_str = f"Invalid stat name. {stats} option not supported."
raise ValueError(err_str)
stats_funcs_dict[stats] = func
elif isinstance(stats_funcs, dict):
stats_funcs_dict = stats_funcs.copy()
mapper = ArrayTypeFunctionMapping(
numpy_func=lambda *args: _stats_numpy(*args, return_type=return_type),
dask_func=_stats_dask_numpy,
cupy_func=_stats_cupy,
dask_cupy_func=lambda *args: not_implemented_func(
*args, messages='stats() does not support dask with cupy backed DataArray' # noqa
),
)
result = mapper(values)(
zones.data, values.data, zone_ids, stats_funcs_dict, nodata_values,
)
if return_type == 'xarray.DataArray':
return xr.DataArray(
result,
coords={'stats': list(stats_funcs_dict.keys()), **values.coords},
dims=('stats', *values.dims),
attrs=values.attrs
)
return result
def _find_cats(values, cat_ids, nodata_values):
if len(values.shape) == 2:
# 2D case
unique_cats = np.unique(values.data[
np.isfinite(values.data) & (values.data != nodata_values)
])
else:
# 3D case
unique_cats = values[values.dims[0]].data
if cat_ids is None:
cat_ids = unique_cats
else:
if isinstance(values.data, np.ndarray):
# remove cats that do not exist in `values` raster
cat_ids = [c for c in cat_ids if c in unique_cats]
else:
cat_ids = _select_ids(unique_cats, cat_ids)
return unique_cats, cat_ids
def _get_zone_values(values_by_zones, start, end):
if len(values_by_zones.shape) == 1:
# 1D flatten, i.e, original data is 2D
return values_by_zones[start:end]
else:
# 2D flatten, i.e, original data is 3D
return values_by_zones[:, start:end]
def _single_zone_crosstab_2d(
zone_values,
unique_cats,
cat_ids,
nodata_values,
crosstab_dict,
):
# 1D flatten zone_values, i.e, original data is 2D
# filter out non-finite and nodata_values
zone_values = zone_values[
np.isfinite(zone_values) & (zone_values != nodata_values)
]
total_count = zone_values.shape[0]
crosstab_dict[TOTAL_COUNT].append(total_count)
sorted_zone_values = np.sort(zone_values)
zone_cat_breaks = _strides(sorted_zone_values, unique_cats)
cat_start = 0
for j, cat in enumerate(unique_cats):
if cat in cat_ids:
count = zone_cat_breaks[j] - cat_start
crosstab_dict[cat].append(count)
cat_start = zone_cat_breaks[j]
def _single_zone_crosstab_3d(
zone_values,
unique_cats,
cat_ids,
nodata_values,
crosstab_dict,
stats_func
):
# 2D flatten `zone_values`, i.e, original data is 3D
for j, cat in enumerate(unique_cats):
if cat in cat_ids:
zone_cat_data = zone_values[j]
# filter out non-finite and nodata_values
zone_cat_data = zone_cat_data[
np.isfinite(zone_cat_data)
& (zone_cat_data != nodata_values)
]
crosstab_dict[cat].append(stats_func(zone_cat_data))
def _crosstab_numpy(
zones: np.ndarray,
values: np.ndarray,
zone_ids: List[Union[int, float]],
unique_cats: np.ndarray,
cat_ids: Union[List, np.ndarray],
nodata_values: Union[int, float],
agg: str,
) -> pd.DataFrame:
# find ids for all zones
unique_zones = np.unique(zones[np.isfinite(zones)])
# selected zones to do analysis
if zone_ids is None:
zone_ids = unique_zones
else:
# remove zones that do not exist in `zones` raster
zone_ids = [z for z in zone_ids if z in unique_zones]
crosstab_dict = {}
crosstab_dict["zone"] = zone_ids
if len(values.shape) == 2:
crosstab_dict[TOTAL_COUNT] = []
for cat in cat_ids:
crosstab_dict[cat] = []
_, values_by_zones, zone_breaks = _sort_and_stride(
zones, values, unique_zones
)
start = 0
for i in range(len(unique_zones)):
end = zone_breaks[i]
if unique_zones[i] in zone_ids:
# get data for zone unique_zones[i]
zone_values = _get_zone_values(values_by_zones, start, end)
if len(values.shape) == 2:
_single_zone_crosstab_2d(
zone_values, unique_cats, cat_ids, nodata_values, crosstab_dict
)
else:
_single_zone_crosstab_3d(
zone_values, unique_cats, cat_ids, nodata_values, crosstab_dict, _DEFAULT_STATS[agg] # noqa
)
start = end
if TOTAL_COUNT in crosstab_dict:
crosstab_dict[TOTAL_COUNT] = np.array(
crosstab_dict[TOTAL_COUNT], dtype=np.float32
)
for cat in cat_ids:
crosstab_dict[cat] = np.array(crosstab_dict[cat])
# construct output dataframe
if agg == 'percentage':
# replace 0s with nans to avoid dividing by 0 error
crosstab_dict[TOTAL_COUNT][crosstab_dict[TOTAL_COUNT] == 0] = np.nan
for cat in cat_ids:
crosstab_dict[cat] = crosstab_dict[cat] / crosstab_dict[TOTAL_COUNT] * 100 # noqa
crosstab_df = pd.DataFrame(crosstab_dict)
crosstab_df = crosstab_df[['zone'] + list(cat_ids)]
return crosstab_df
@delayed
def _single_chunk_crosstab(
zones_block: np.array,
values_block: np.array,
unique_zones: np.array,
zone_ids: np.array,
unique_cats,
cat_ids,
nodata_values: Union[int, float],
):
results = {}
if len(values_block.shape) == 2:
results[TOTAL_COUNT] = []
for cat in cat_ids:
results[cat] = []
_, values_by_zones, zone_breaks = _sort_and_stride(
zones_block, values_block, unique_zones
)
start = 0
for i in range(len(unique_zones)):
end = zone_breaks[i]
if unique_zones[i] in zone_ids:
# get data for zone unique_zones[i]
zone_values = _get_zone_values(values_by_zones, start, end)
if len(values_block.shape) == 2:
_single_zone_crosstab_2d(
zone_values, unique_cats, cat_ids, nodata_values, results
)
else:
_single_zone_crosstab_3d(
zone_values, unique_cats, cat_ids, nodata_values, results, _DEFAULT_STATS['count'] # noqa
)
start = end
if TOTAL_COUNT in results:
results[TOTAL_COUNT] = np.array(results[TOTAL_COUNT], dtype=np.float32)
for cat in cat_ids:
results[cat] = np.array(results[cat])
return results
@delayed
def _select_ids(unique_ids, ids):
selected_ids = []
for i in ids:
if i in unique_ids:
selected_ids.append(i)
return selected_ids
@delayed
def _crosstab_df_dask(crosstab_by_block, zone_ids, cat_ids, agg):
result = crosstab_by_block[0]
for i in range(1, len(crosstab_by_block)):
for k in crosstab_by_block[i]:
result[k] += crosstab_by_block[i][k]
if agg == 'percentage':
# replace 0s with nans to avoid dividing by 0 error
result[TOTAL_COUNT][result[TOTAL_COUNT] == 0] = np.nan
for cat in cat_ids:
result[cat] = result[cat] / result[TOTAL_COUNT] * 100
df = pd.DataFrame(result)
df['zone'] = zone_ids
columns = ['zone'] + list(cat_ids)
df = df[columns]
return df
def _crosstab_dask_numpy(
zones: np.ndarray,
values: np.ndarray,
zone_ids: List[Union[int, float]],
unique_cats: np.ndarray,
cat_ids: Union[List, np.ndarray],
nodata_values: Union[int, float],
agg: str,
):
# find ids for all zones
unique_zones = np.unique(zones[np.isfinite(zones)])
if zone_ids is None:
zone_ids = unique_zones
else:
zone_ids = _select_ids(unique_zones, zone_ids)
cat_ids = _select_ids(unique_cats, cat_ids)
zones_blocks = zones.to_delayed().ravel()
values_blocks = values.to_delayed().ravel()
crosstab_by_block = [
_single_chunk_crosstab(
z, v, unique_zones, zone_ids,
unique_cats, cat_ids, nodata_values
)
for z, v in zip(zones_blocks, values_blocks)
]
crosstab_df = _crosstab_df_dask(
crosstab_by_block, zone_ids, cat_ids, agg
)
return dd.from_delayed(crosstab_df)
def crosstab(
zones: xr.DataArray,
values: xr.DataArray,
zone_ids: List[Union[int, float]] = None,
cat_ids: List[Union[int, float]] = None,
layer: Optional[int] = None,
agg: Optional[str] = "count",
nodata_values: Optional[Union[int, float]] = None,
) -> Union[pd.DataFrame, dd.DataFrame]:
"""
Calculate cross-tabulated (categorical stats) areas
between two datasets: a zone dataset `zones`, a value dataset `values`
(a value raster). Infinite and NaN values in `zones` and `values` will
be ignored.
Outputs a pandas DataFrame if `zones` and `values` are numpy backed.
Outputs a dask DataFrame if `zones` and `values` are dask with
numpy-backed xarray DataArrays.
Requires a DataArray with a single data dimension, here called the
"values", indexed using either 2D or 3D coordinates.
DataArrays with 3D coordinates are expected to contain values
distributed over different categories that are indexed by the
additional coordinate. Such an array would reduce to the
2D-coordinate case if collapsed across the categories (e.g. if one
did ``aggc.sum(dim='cat')`` for a categorical dimension ``cat``).
Parameters
----------
zones : xr.DataArray
2D data array of integers or floats.
A zone is all the cells in a raster that have the same value,
whether or not they are contiguous. The input `zones` raster defines
the shape, values, and locations of the zones. An unique field
in the zone input is specified to define the zones.
values : xr.DataArray
2D or 3D data array of integers or floats.
The input value raster contains the input values used in
calculating the categorical statistic for each zone.
zone_ids: List of ints, or floats
List of zones to be included in calculation. If no zone_ids provided,
all zones will be used.
cat_ids: List of ints, or floats
List of categories to be included in calculation.
If no cat_ids provided, all categories will be used.
layer: int, default=0
index of the categorical dimension layer inside the `values` DataArray.
agg: str, default = 'count'
Aggregation method.
If the `values` data is 2D, available options are: `percentage`, and `count`.
If `values` is 3D and is numpy-backed, available options are:
`min`, `max`, `mean`, `sum`, `std`, `var`, and `count`.
If `values` is 3D and is dask with numpy-backed, the only available option is `count`.
nodata_values: int, float, default=None
Nodata value in `values` raster.
Cells with `nodata` do not belong to any zone,
and thus excluded from calculation.
Returns
-------
crosstab_df : Union[pandas.DataFrame, dask.dataframe.DataFrame]
A pandas DataFrame, or an uncomputed dask DataFrame,
where each column is a categorical value and each row is a zone
with zone id. Each entry presents the statistics, which computed
using the specified aggregation method, of the category over the zone.
Examples
--------
crosstab() works with NumPy backed DataArray.
.. sourcecode:: python
>>> import numpy as np
>>> import xarray as xr
>>> from xrspatial.zonal import crosstab
>>> values_data = np.asarray([
[0, 0, 10, 20],
[0, 0, 0, 10],
[0, np.nan, 20, 50],
[10, 30, 40, np.inf],
[10, 10, 50, 0]])
>>> values = xr.DataArray(values_data)
>>> zones_data = np.asarray([
[1, 1, 6, 6],
[1, np.nan, 6, 6],
[3, 5, 6, 6],
[3, 5, 7, np.nan],
[3, 7, 7, 0]])
>>> zones = xr.DataArray(zones_data)
>>> # Calculate Crosstab, numpy case
>>> df = crosstab(zones=zones, values=values)
>>> print(df)
zone 0.0 10.0 20.0 30.0 40.0 50.0
0 0 1 0 0 0 0 0
1 1 3 0 0 0 0 0
2 3 1 2 0 0 0 0
3 5 0 0 0 1 0 0
4 6 1 2 2 0 0 1
5 7 0 1 0 0 1 1
crosstab() works with Dask with NumPy backed DataArray.
.. sourcecode:: python
>>> import dask.array as da
>>> values_dask = xr.DataArray(da.from_array(values_data, chunks=(3, 3)))
>>> zones_dask = xr.DataArray(da.from_array(zones_data, chunks=(3, 3)))
>>> dask_df = crosstab(zones=zones_dask, values=values_dask)
>>> print(dask_df)
Dask DataFrame Structure:
zone 0.0 10.0 20.0 30.0 40.0 50.0
npartitions=5
0 float64 int64 int64 int64 int64 int64 int64
1 ... ... ... ... ... ... ...
... ... ... ... ... ... ... ...
4 ... ... ... ... ... ... ...
5 ... ... ... ... ... ... ...
Dask Name: astype, 1186 tasks
>>> print(dask_df.compute())
zone 0.0 10.0 20.0 30.0 40.0 50.0
0 0 1 0 0 0 0 0
1 1 3 0 0 0 0 0
2 3 1 2 0 0 0 0
3 5 0 0 0 1 0 0
4 6 1 2 2 0 0 1
5 7 0 1 0 0 1 1
"""
if not isinstance(zones, xr.DataArray):
raise TypeError("zones must be instance of DataArray")
if not isinstance(values, xr.DataArray):
raise TypeError("values must be instance of DataArray")
if zones.ndim != 2:
raise ValueError("zones must be 2D")
if not (
issubclass(zones.data.dtype.type, np.integer)
or issubclass(zones.data.dtype.type, np.floating)
):
raise ValueError("`zones` must be an xarray of integers or floats")
if not issubclass(values.data.dtype.type, np.integer) and not issubclass(
values.data.dtype.type, np.floating
):
raise ValueError("`values` must be an xarray of integers or floats")
if values.ndim not in [2, 3]: