parallel.go 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612
  1. package roaring
  2. import (
  3. "container/heap"
  4. "fmt"
  5. "runtime"
  6. "sync"
  7. )
  8. var defaultWorkerCount = runtime.NumCPU()
  9. type bitmapContainerKey struct {
  10. key uint16
  11. idx int
  12. bitmap *Bitmap
  13. }
  14. type multipleContainers struct {
  15. key uint16
  16. containers []container
  17. idx int
  18. }
  19. type keyedContainer struct {
  20. key uint16
  21. container container
  22. idx int
  23. }
  24. type bitmapContainerHeap []bitmapContainerKey
  25. func (h bitmapContainerHeap) Len() int { return len(h) }
  26. func (h bitmapContainerHeap) Less(i, j int) bool { return h[i].key < h[j].key }
  27. func (h bitmapContainerHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
  28. func (h *bitmapContainerHeap) Push(x interface{}) {
  29. // Push and Pop use pointer receivers because they modify the slice's length,
  30. // not just its contents.
  31. *h = append(*h, x.(bitmapContainerKey))
  32. }
  33. func (h *bitmapContainerHeap) Pop() interface{} {
  34. old := *h
  35. n := len(old)
  36. x := old[n-1]
  37. *h = old[0 : n-1]
  38. return x
  39. }
  40. func (h bitmapContainerHeap) Peek() bitmapContainerKey {
  41. return h[0]
  42. }
  43. func (h *bitmapContainerHeap) popIncrementing() (key uint16, container container) {
  44. k := h.Peek()
  45. key = k.key
  46. container = k.bitmap.highlowcontainer.containers[k.idx]
  47. newIdx := k.idx + 1
  48. if newIdx < k.bitmap.highlowcontainer.size() {
  49. k = bitmapContainerKey{
  50. k.bitmap.highlowcontainer.keys[newIdx],
  51. newIdx,
  52. k.bitmap,
  53. }
  54. (*h)[0] = k
  55. heap.Fix(h, 0)
  56. } else {
  57. heap.Pop(h)
  58. }
  59. return
  60. }
  61. func (h *bitmapContainerHeap) Next(containers []container) multipleContainers {
  62. if h.Len() == 0 {
  63. return multipleContainers{}
  64. }
  65. key, container := h.popIncrementing()
  66. containers = append(containers, container)
  67. for h.Len() > 0 && key == h.Peek().key {
  68. _, container = h.popIncrementing()
  69. containers = append(containers, container)
  70. }
  71. return multipleContainers{
  72. key,
  73. containers,
  74. -1,
  75. }
  76. }
  77. func newBitmapContainerHeap(bitmaps ...*Bitmap) bitmapContainerHeap {
  78. // Initialize heap
  79. var h bitmapContainerHeap = make([]bitmapContainerKey, 0, len(bitmaps))
  80. for _, bitmap := range bitmaps {
  81. if !bitmap.IsEmpty() {
  82. key := bitmapContainerKey{
  83. bitmap.highlowcontainer.keys[0],
  84. 0,
  85. bitmap,
  86. }
  87. h = append(h, key)
  88. }
  89. }
  90. heap.Init(&h)
  91. return h
  92. }
  93. func repairAfterLazy(c container) container {
  94. switch t := c.(type) {
  95. case *bitmapContainer:
  96. if t.cardinality == invalidCardinality {
  97. t.computeCardinality()
  98. }
  99. if t.getCardinality() <= arrayDefaultMaxSize {
  100. return t.toArrayContainer()
  101. } else if c.(*bitmapContainer).isFull() {
  102. return newRunContainer16Range(0, MaxUint16)
  103. }
  104. }
  105. return c
  106. }
  107. func toBitmapContainer(c container) container {
  108. switch t := c.(type) {
  109. case *arrayContainer:
  110. return t.toBitmapContainer()
  111. case *runContainer16:
  112. if !t.isFull() {
  113. return t.toBitmapContainer()
  114. }
  115. }
  116. return c
  117. }
  118. func appenderRoutine(bitmapChan chan<- *Bitmap, resultChan <-chan keyedContainer, expectedKeysChan <-chan int) {
  119. expectedKeys := -1
  120. appendedKeys := 0
  121. var keys []uint16
  122. var containers []container
  123. for appendedKeys != expectedKeys {
  124. select {
  125. case item := <-resultChan:
  126. if len(keys) <= item.idx {
  127. keys = append(keys, make([]uint16, item.idx-len(keys)+1)...)
  128. containers = append(containers, make([]container, item.idx-len(containers)+1)...)
  129. }
  130. keys[item.idx] = item.key
  131. containers[item.idx] = item.container
  132. appendedKeys++
  133. case msg := <-expectedKeysChan:
  134. expectedKeys = msg
  135. }
  136. }
  137. answer := &Bitmap{
  138. roaringArray{
  139. make([]uint16, 0, expectedKeys),
  140. make([]container, 0, expectedKeys),
  141. make([]bool, 0, expectedKeys),
  142. false,
  143. },
  144. }
  145. for i := range keys {
  146. if containers[i] != nil { // in case a resulting container was empty, see ParAnd function
  147. answer.highlowcontainer.appendContainer(keys[i], containers[i], false)
  148. }
  149. }
  150. bitmapChan <- answer
  151. }
  152. // ParHeapOr computes the union (OR) of all provided bitmaps in parallel,
  153. // where the parameter "parallelism" determines how many workers are to be used
  154. // (if it is set to 0, a default number of workers is chosen)
  155. // ParHeapOr uses a heap to compute the union. For rare cases it might be faster than ParOr
  156. func ParHeapOr(parallelism int, bitmaps ...*Bitmap) *Bitmap {
  157. bitmapCount := len(bitmaps)
  158. if bitmapCount == 0 {
  159. return NewBitmap()
  160. } else if bitmapCount == 1 {
  161. return bitmaps[0].Clone()
  162. }
  163. if parallelism == 0 {
  164. parallelism = defaultWorkerCount
  165. }
  166. h := newBitmapContainerHeap(bitmaps...)
  167. bitmapChan := make(chan *Bitmap)
  168. inputChan := make(chan multipleContainers, 128)
  169. resultChan := make(chan keyedContainer, 32)
  170. expectedKeysChan := make(chan int)
  171. pool := sync.Pool{
  172. New: func() interface{} {
  173. return make([]container, 0, len(bitmaps))
  174. },
  175. }
  176. orFunc := func() {
  177. // Assumes only structs with >=2 containers are passed
  178. for input := range inputChan {
  179. c := toBitmapContainer(input.containers[0]).lazyOR(input.containers[1])
  180. for _, next := range input.containers[2:] {
  181. c = c.lazyIOR(next)
  182. }
  183. c = repairAfterLazy(c)
  184. kx := keyedContainer{
  185. input.key,
  186. c,
  187. input.idx,
  188. }
  189. resultChan <- kx
  190. pool.Put(input.containers[:0])
  191. }
  192. }
  193. go appenderRoutine(bitmapChan, resultChan, expectedKeysChan)
  194. for i := 0; i < parallelism; i++ {
  195. go orFunc()
  196. }
  197. idx := 0
  198. for h.Len() > 0 {
  199. ck := h.Next(pool.Get().([]container))
  200. if len(ck.containers) == 1 {
  201. resultChan <- keyedContainer{
  202. ck.key,
  203. ck.containers[0],
  204. idx,
  205. }
  206. pool.Put(ck.containers[:0])
  207. } else {
  208. ck.idx = idx
  209. inputChan <- ck
  210. }
  211. idx++
  212. }
  213. expectedKeysChan <- idx
  214. bitmap := <-bitmapChan
  215. close(inputChan)
  216. close(resultChan)
  217. close(expectedKeysChan)
  218. return bitmap
  219. }
  220. // ParAnd computes the intersection (AND) of all provided bitmaps in parallel,
  221. // where the parameter "parallelism" determines how many workers are to be used
  222. // (if it is set to 0, a default number of workers is chosen)
  223. func ParAnd(parallelism int, bitmaps ...*Bitmap) *Bitmap {
  224. bitmapCount := len(bitmaps)
  225. if bitmapCount == 0 {
  226. return NewBitmap()
  227. } else if bitmapCount == 1 {
  228. return bitmaps[0].Clone()
  229. }
  230. if parallelism == 0 {
  231. parallelism = defaultWorkerCount
  232. }
  233. h := newBitmapContainerHeap(bitmaps...)
  234. bitmapChan := make(chan *Bitmap)
  235. inputChan := make(chan multipleContainers, 128)
  236. resultChan := make(chan keyedContainer, 32)
  237. expectedKeysChan := make(chan int)
  238. andFunc := func() {
  239. // Assumes only structs with >=2 containers are passed
  240. for input := range inputChan {
  241. c := input.containers[0].and(input.containers[1])
  242. for _, next := range input.containers[2:] {
  243. if c.isEmpty() {
  244. break
  245. }
  246. c = c.iand(next)
  247. }
  248. // Send a nil explicitly if the result of the intersection is an empty container
  249. if c.isEmpty() {
  250. c = nil
  251. }
  252. kx := keyedContainer{
  253. input.key,
  254. c,
  255. input.idx,
  256. }
  257. resultChan <- kx
  258. }
  259. }
  260. go appenderRoutine(bitmapChan, resultChan, expectedKeysChan)
  261. for i := 0; i < parallelism; i++ {
  262. go andFunc()
  263. }
  264. idx := 0
  265. for h.Len() > 0 {
  266. ck := h.Next(make([]container, 0, 4))
  267. if len(ck.containers) == bitmapCount {
  268. ck.idx = idx
  269. inputChan <- ck
  270. idx++
  271. }
  272. }
  273. expectedKeysChan <- idx
  274. bitmap := <-bitmapChan
  275. close(inputChan)
  276. close(resultChan)
  277. close(expectedKeysChan)
  278. return bitmap
  279. }
  280. // ParOr computes the union (OR) of all provided bitmaps in parallel,
  281. // where the parameter "parallelism" determines how many workers are to be used
  282. // (if it is set to 0, a default number of workers is chosen)
  283. func ParOr(parallelism int, bitmaps ...*Bitmap) *Bitmap {
  284. var lKey uint16 = MaxUint16
  285. var hKey uint16
  286. bitmapsFiltered := bitmaps[:0]
  287. for _, b := range bitmaps {
  288. if !b.IsEmpty() {
  289. bitmapsFiltered = append(bitmapsFiltered, b)
  290. }
  291. }
  292. bitmaps = bitmapsFiltered
  293. for _, b := range bitmaps {
  294. lKey = minOfUint16(lKey, b.highlowcontainer.keys[0])
  295. hKey = maxOfUint16(hKey, b.highlowcontainer.keys[b.highlowcontainer.size()-1])
  296. }
  297. if lKey == MaxUint16 && hKey == 0 {
  298. return New()
  299. } else if len(bitmaps) == 1 {
  300. return bitmaps[0].Clone()
  301. }
  302. keyRange := int(hKey) - int(lKey) + 1
  303. if keyRange == 1 {
  304. // revert to FastOr. Since the key range is 0
  305. // no container-level aggregation parallelism is achievable
  306. return FastOr(bitmaps...)
  307. }
  308. if parallelism == 0 {
  309. parallelism = defaultWorkerCount
  310. }
  311. var chunkSize int
  312. var chunkCount int
  313. if parallelism*4 > int(keyRange) {
  314. chunkSize = 1
  315. chunkCount = int(keyRange)
  316. } else {
  317. chunkCount = parallelism * 4
  318. chunkSize = (int(keyRange) + chunkCount - 1) / chunkCount
  319. }
  320. if chunkCount*chunkSize < int(keyRange) {
  321. // it's fine to panic to indicate an implementation error
  322. panic(fmt.Sprintf("invariant check failed: chunkCount * chunkSize < keyRange, %d * %d < %d", chunkCount, chunkSize, keyRange))
  323. }
  324. chunks := make([]*roaringArray, chunkCount)
  325. chunkSpecChan := make(chan parChunkSpec, minOfInt(maxOfInt(64, 2*parallelism), int(chunkCount)))
  326. chunkChan := make(chan parChunk, minOfInt(32, int(chunkCount)))
  327. orFunc := func() {
  328. for spec := range chunkSpecChan {
  329. ra := lazyOrOnRange(&bitmaps[0].highlowcontainer, &bitmaps[1].highlowcontainer, spec.start, spec.end)
  330. for _, b := range bitmaps[2:] {
  331. ra = lazyIOrOnRange(ra, &b.highlowcontainer, spec.start, spec.end)
  332. }
  333. for i, c := range ra.containers {
  334. ra.containers[i] = repairAfterLazy(c)
  335. }
  336. chunkChan <- parChunk{ra, spec.idx}
  337. }
  338. }
  339. for i := 0; i < parallelism; i++ {
  340. go orFunc()
  341. }
  342. go func() {
  343. for i := 0; i < chunkCount; i++ {
  344. spec := parChunkSpec{
  345. start: uint16(int(lKey) + i*chunkSize),
  346. end: uint16(minOfInt(int(lKey)+(i+1)*chunkSize-1, int(hKey))),
  347. idx: int(i),
  348. }
  349. chunkSpecChan <- spec
  350. }
  351. }()
  352. chunksRemaining := chunkCount
  353. for chunk := range chunkChan {
  354. chunks[chunk.idx] = chunk.ra
  355. chunksRemaining--
  356. if chunksRemaining == 0 {
  357. break
  358. }
  359. }
  360. close(chunkChan)
  361. close(chunkSpecChan)
  362. containerCount := 0
  363. for _, chunk := range chunks {
  364. containerCount += chunk.size()
  365. }
  366. result := Bitmap{
  367. roaringArray{
  368. containers: make([]container, containerCount),
  369. keys: make([]uint16, containerCount),
  370. needCopyOnWrite: make([]bool, containerCount),
  371. },
  372. }
  373. resultOffset := 0
  374. for _, chunk := range chunks {
  375. copy(result.highlowcontainer.containers[resultOffset:], chunk.containers)
  376. copy(result.highlowcontainer.keys[resultOffset:], chunk.keys)
  377. copy(result.highlowcontainer.needCopyOnWrite[resultOffset:], chunk.needCopyOnWrite)
  378. resultOffset += chunk.size()
  379. }
  380. return &result
  381. }
  382. type parChunkSpec struct {
  383. start uint16
  384. end uint16
  385. idx int
  386. }
  387. type parChunk struct {
  388. ra *roaringArray
  389. idx int
  390. }
  391. func (c parChunk) size() int {
  392. return c.ra.size()
  393. }
  394. func parNaiveStartAt(ra *roaringArray, start uint16, last uint16) int {
  395. for idx, key := range ra.keys {
  396. if key >= start && key <= last {
  397. return idx
  398. } else if key > last {
  399. break
  400. }
  401. }
  402. return ra.size()
  403. }
  404. func lazyOrOnRange(ra1, ra2 *roaringArray, start, last uint16) *roaringArray {
  405. answer := newRoaringArray()
  406. length1 := ra1.size()
  407. length2 := ra2.size()
  408. idx1 := parNaiveStartAt(ra1, start, last)
  409. idx2 := parNaiveStartAt(ra2, start, last)
  410. var key1 uint16
  411. var key2 uint16
  412. if idx1 < length1 && idx2 < length2 {
  413. key1 = ra1.getKeyAtIndex(idx1)
  414. key2 = ra2.getKeyAtIndex(idx2)
  415. for key1 <= last && key2 <= last {
  416. if key1 < key2 {
  417. answer.appendCopy(*ra1, idx1)
  418. idx1++
  419. if idx1 == length1 {
  420. break
  421. }
  422. key1 = ra1.getKeyAtIndex(idx1)
  423. } else if key1 > key2 {
  424. answer.appendCopy(*ra2, idx2)
  425. idx2++
  426. if idx2 == length2 {
  427. break
  428. }
  429. key2 = ra2.getKeyAtIndex(idx2)
  430. } else {
  431. c1 := ra1.getFastContainerAtIndex(idx1, false)
  432. answer.appendContainer(key1, c1.lazyOR(ra2.getContainerAtIndex(idx2)), false)
  433. idx1++
  434. idx2++
  435. if idx1 == length1 || idx2 == length2 {
  436. break
  437. }
  438. key1 = ra1.getKeyAtIndex(idx1)
  439. key2 = ra2.getKeyAtIndex(idx2)
  440. }
  441. }
  442. }
  443. if idx2 < length2 {
  444. key2 = ra2.getKeyAtIndex(idx2)
  445. for key2 <= last {
  446. answer.appendCopy(*ra2, idx2)
  447. idx2++
  448. if idx2 == length2 {
  449. break
  450. }
  451. key2 = ra2.getKeyAtIndex(idx2)
  452. }
  453. }
  454. if idx1 < length1 {
  455. key1 = ra1.getKeyAtIndex(idx1)
  456. for key1 <= last {
  457. answer.appendCopy(*ra1, idx1)
  458. idx1++
  459. if idx1 == length1 {
  460. break
  461. }
  462. key1 = ra1.getKeyAtIndex(idx1)
  463. }
  464. }
  465. return answer
  466. }
  467. func lazyIOrOnRange(ra1, ra2 *roaringArray, start, last uint16) *roaringArray {
  468. length1 := ra1.size()
  469. length2 := ra2.size()
  470. idx1 := 0
  471. idx2 := parNaiveStartAt(ra2, start, last)
  472. var key1 uint16
  473. var key2 uint16
  474. if idx1 < length1 && idx2 < length2 {
  475. key1 = ra1.getKeyAtIndex(idx1)
  476. key2 = ra2.getKeyAtIndex(idx2)
  477. for key1 <= last && key2 <= last {
  478. if key1 < key2 {
  479. idx1++
  480. if idx1 >= length1 {
  481. break
  482. }
  483. key1 = ra1.getKeyAtIndex(idx1)
  484. } else if key1 > key2 {
  485. ra1.insertNewKeyValueAt(idx1, key2, ra2.getContainerAtIndex(idx2))
  486. ra1.needCopyOnWrite[idx1] = true
  487. idx2++
  488. idx1++
  489. length1++
  490. if idx2 >= length2 {
  491. break
  492. }
  493. key2 = ra2.getKeyAtIndex(idx2)
  494. } else {
  495. c1 := ra1.getFastContainerAtIndex(idx1, true)
  496. ra1.containers[idx1] = c1.lazyIOR(ra2.getContainerAtIndex(idx2))
  497. ra1.needCopyOnWrite[idx1] = false
  498. idx1++
  499. idx2++
  500. if idx1 >= length1 || idx2 >= length2 {
  501. break
  502. }
  503. key1 = ra1.getKeyAtIndex(idx1)
  504. key2 = ra2.getKeyAtIndex(idx2)
  505. }
  506. }
  507. }
  508. if idx2 < length2 {
  509. key2 = ra2.getKeyAtIndex(idx2)
  510. for key2 <= last {
  511. ra1.appendCopy(*ra2, idx2)
  512. idx2++
  513. if idx2 >= length2 {
  514. break
  515. }
  516. key2 = ra2.getKeyAtIndex(idx2)
  517. }
  518. }
  519. return ra1
  520. }