|
| 1 | +/* |
| 2 | +Copyright 2014 Workiva, LLC |
| 3 | +
|
| 4 | +Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | +you may not use this file except in compliance with the License. |
| 6 | +You may obtain a copy of the License at |
| 7 | +
|
| 8 | + http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | +
|
| 10 | +Unless required by applicable law or agreed to in writing, software |
| 11 | +distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | +See the License for the specific language governing permissions and |
| 14 | +limitations under the License. |
| 15 | +*/ |
| 16 | + |
| 17 | +package bitarray |
| 18 | + |
| 19 | +func nandSparseWithSparseBitArray(sba, other *sparseBitArray) BitArray { |
| 20 | + // nand is an operation on the incoming array only, so the size will never |
| 21 | + // be more than the incoming array, regardless of the size of the other |
| 22 | + max := len(sba.indices) |
| 23 | + indices := make(uintSlice, 0, max) |
| 24 | + blocks := make(blocks, 0, max) |
| 25 | + |
| 26 | + selfIndex := 0 |
| 27 | + otherIndex := 0 |
| 28 | + var resultBlock block |
| 29 | + |
| 30 | + // move through the array and compare the blocks if they happen to |
| 31 | + // intersect |
| 32 | + for { |
| 33 | + if selfIndex == len(sba.indices) { |
| 34 | + // The bitarray being operated on is exhausted, so just return |
| 35 | + break |
| 36 | + } else if otherIndex == len(other.indices) { |
| 37 | + // The other array is exhausted. In this case, we assume that we |
| 38 | + // are calling nand on empty bit arrays, which is the same as just |
| 39 | + // copying the value in the sba array |
| 40 | + indices = append(indices, sba.indices[selfIndex]) |
| 41 | + blocks = append(blocks, sba.blocks[selfIndex]) |
| 42 | + selfIndex++ |
| 43 | + continue |
| 44 | + } |
| 45 | + |
| 46 | + selfValue := sba.indices[selfIndex] |
| 47 | + otherValue := other.indices[otherIndex] |
| 48 | + |
| 49 | + switch { |
| 50 | + case otherValue < selfValue: |
| 51 | + // The `sba` bitarray has a block with a index position |
| 52 | + // greater than us. We want to compare with that block |
| 53 | + // if possible, so move our `other` index closer to that |
| 54 | + // block's index. |
| 55 | + otherIndex++ |
| 56 | + |
| 57 | + case otherValue > selfValue: |
| 58 | + // Here, the sba array has blocks that the other array doesn't |
| 59 | + // have. In this case, we just copy exactly the sba array values |
| 60 | + indices = append(indices, selfValue) |
| 61 | + blocks = append(blocks, sba.blocks[selfIndex]) |
| 62 | + |
| 63 | + // This is the exact logical inverse of the above case. |
| 64 | + selfIndex++ |
| 65 | + |
| 66 | + default: |
| 67 | + // Here, our indices match for both `sba` and `other`. |
| 68 | + // Time to do the bitwise AND operation and add a block |
| 69 | + // to our result list if the block has values in it. |
| 70 | + resultBlock = sba.blocks[selfIndex].nand(other.blocks[otherIndex]) |
| 71 | + if resultBlock > 0 { |
| 72 | + indices = append(indices, selfValue) |
| 73 | + blocks = append(blocks, resultBlock) |
| 74 | + } |
| 75 | + selfIndex++ |
| 76 | + otherIndex++ |
| 77 | + } |
| 78 | + } |
| 79 | + |
| 80 | + return &sparseBitArray{ |
| 81 | + indices: indices, |
| 82 | + blocks: blocks, |
| 83 | + } |
| 84 | +} |
| 85 | + |
| 86 | +func nandSparseWithDenseBitArray(sba *sparseBitArray, other *bitArray) BitArray { |
| 87 | + // Since nand is non-commutative, the resulting array should be sparse, |
| 88 | + // and the same length or less than the sparse array |
| 89 | + indices := make(uintSlice, 0, len(sba.indices)) |
| 90 | + blocks := make(blocks, 0, len(sba.indices)) |
| 91 | + |
| 92 | + var resultBlock block |
| 93 | + |
| 94 | + // Loop through the sparse array and match it with the dense array. |
| 95 | + for selfIndex, selfValue := range sba.indices { |
| 96 | + if selfValue >= uint64(len(other.blocks)) { |
| 97 | + // Since the dense array is exhausted, just copy over the data |
| 98 | + // from the sparse array |
| 99 | + resultBlock = sba.blocks[selfIndex] |
| 100 | + indices = append(indices, selfValue) |
| 101 | + blocks = append(blocks, resultBlock) |
| 102 | + continue |
| 103 | + } |
| 104 | + |
| 105 | + resultBlock = sba.blocks[selfIndex].nand(other.blocks[selfValue]) |
| 106 | + if resultBlock > 0 { |
| 107 | + indices = append(indices, selfValue) |
| 108 | + blocks = append(blocks, resultBlock) |
| 109 | + } |
| 110 | + } |
| 111 | + |
| 112 | + return &sparseBitArray{ |
| 113 | + indices: indices, |
| 114 | + blocks: blocks, |
| 115 | + } |
| 116 | +} |
| 117 | + |
| 118 | +func nandDenseWithSparseBitArray(sba *bitArray, other *sparseBitArray) BitArray { |
| 119 | + // Since nand is non-commutative, the resulting array should be dense, |
| 120 | + // and the same length or less than the dense array |
| 121 | + tmp := sba.copy() |
| 122 | + ret := tmp.(*bitArray) |
| 123 | + |
| 124 | + // Loop through the other array and match it with the sba array. |
| 125 | + for otherIndex, otherValue := range other.indices { |
| 126 | + if otherValue >= uint64(len(ret.blocks)) { |
| 127 | + break |
| 128 | + } |
| 129 | + |
| 130 | + ret.blocks[otherValue] = sba.blocks[otherValue].nand(other.blocks[otherIndex]) |
| 131 | + } |
| 132 | + |
| 133 | + ret.setLowest() |
| 134 | + ret.setHighest() |
| 135 | + |
| 136 | + return ret |
| 137 | +} |
| 138 | + |
| 139 | +func nandDenseWithDenseBitArray(dba, other *bitArray) BitArray { |
| 140 | + min := uint64(len(dba.blocks)) |
| 141 | + |
| 142 | + ba := newBitArray(min * s) |
| 143 | + |
| 144 | + for i := uint64(0); i < min; i++ { |
| 145 | + ba.blocks[i] = dba.blocks[i].nand(other.blocks[i]) |
| 146 | + } |
| 147 | + |
| 148 | + ba.setLowest() |
| 149 | + ba.setHighest() |
| 150 | + |
| 151 | + return ba |
| 152 | +} |
0 commit comments