result.go 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154
  1. // Copyright 2019 Yunion
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package core
  15. import (
  16. "encoding/json"
  17. "sort"
  18. "yunion.io/x/onecloud/pkg/apis/compute"
  19. schedapi "yunion.io/x/onecloud/pkg/apis/scheduler"
  20. "yunion.io/x/onecloud/pkg/scheduler/api"
  21. )
  22. type ScheduleResult struct {
  23. // Result is sync schedule result
  24. Result *schedapi.ScheduleOutput
  25. // ForecastResult is forecast schedule result
  26. ForecastResult *api.SchedForecastResult
  27. // TestResult is test schedule result
  28. TestResult interface{}
  29. }
  30. type SchedResultItem struct {
  31. ID string `json:"id"`
  32. Name string `json:"name"`
  33. Count int64 `json:"count"`
  34. Data map[string]interface{} `json:"data"`
  35. Capacity int64 `json:"capacity"`
  36. Score Score `json:"score"`
  37. CapacityDetails map[string]int64 `json:"capacity_details"`
  38. ScoreDetails string `json:"score_details"`
  39. Candidater Candidater `json:"-"`
  40. *AllocatedResource
  41. SchedData *api.SchedInfo
  42. }
  43. type SchedResultItemList struct {
  44. Unit *Unit
  45. Data SchedResultItems
  46. }
  47. func (its SchedResultItemList) String() string {
  48. bytes, _ := json.Marshal(its.Data)
  49. return string(bytes)
  50. }
  51. type SchedResultItems []*SchedResultItem
  52. func (its SchedResultItems) Len() int {
  53. return len(its)
  54. }
  55. func (its SchedResultItems) Swap(i, j int) {
  56. its[i], its[j] = its[j], its[i]
  57. }
  58. func (its SchedResultItems) Less(i, j int) bool {
  59. it1, it2 := its[i], its[j]
  60. return it1.Capacity < it2.Capacity
  61. }
  62. func (item *SchedResultItem) ToCandidateResource(storageUsed *StorageUsed) *schedapi.CandidateResource {
  63. return &schedapi.CandidateResource{
  64. HostId: item.ID,
  65. CpuNumaPin: item.selectCpuNumaPin(),
  66. Name: item.Name,
  67. Disks: item.getDisks(storageUsed),
  68. Nets: item.Nets,
  69. }
  70. }
  71. func (item *SchedResultItem) selectCpuNumaPin() []schedapi.SCpuNumaPin {
  72. vcpuCount := item.SchedData.Ncpu
  73. if item.SchedData.ExtraCpuCount > 0 {
  74. vcpuCount += item.SchedData.ExtraCpuCount
  75. }
  76. var res []schedapi.SCpuNumaPin
  77. if item.SchedData.LiveMigrate && len(item.SchedData.CpuNumaPin) > 0 {
  78. res = item.Candidater.AllocCpuNumaPinWithNodeCount(vcpuCount, item.SchedData.Memory*1024, len(item.SchedData.CpuNumaPin))
  79. } else {
  80. res = item.Candidater.AllocCpuNumaPin(vcpuCount, item.SchedData.Memory*1024, item.SchedData.PreferNumaNodes)
  81. }
  82. if item.SchedData.ExtraCpuCount > 0 {
  83. extraCpuCnt := item.SchedData.ExtraCpuCount
  84. for extraCpuCnt > 0 {
  85. cpuMaxIdx := 0
  86. cpuMax := -1
  87. for i := range res {
  88. if len(res[i].CpuPin)-res[i].ExtraCpuCount > cpuMax {
  89. cpuMax = len(res[i].CpuPin) - res[i].ExtraCpuCount
  90. cpuMaxIdx = i
  91. }
  92. }
  93. res[cpuMaxIdx].ExtraCpuCount += 1
  94. extraCpuCnt -= 1
  95. }
  96. }
  97. return res
  98. }
  99. func (item *SchedResultItem) getDisks(used *StorageUsed) []*schedapi.CandidateDisk {
  100. inputs := item.SchedData.Disks
  101. ret := make([]*schedapi.CandidateDisk, 0)
  102. for idx, disk := range item.Disks {
  103. ret = append(ret, &schedapi.CandidateDisk{
  104. Index: idx,
  105. StorageIds: item.getSortStorageIds(used, inputs[idx], disk.Storages),
  106. })
  107. }
  108. return ret
  109. }
  110. func (item *SchedResultItem) getSortStorageIds(
  111. used *StorageUsed,
  112. disk *compute.DiskConfig,
  113. storages []*schedapi.CandidateStorage) []string {
  114. reqSize := disk.SizeMb
  115. ss := make([]sortStorage, 0)
  116. for _, s := range storages {
  117. ss = append(ss, sortStorage{
  118. Id: s.Id,
  119. FeeSize: s.FreeCapacity - used.Get(s.Id),
  120. })
  121. }
  122. toSort := sortStorages(ss)
  123. sort.Sort(toSort)
  124. sortedStorages := toSort.getIds()
  125. ret := make([]string, 0)
  126. for idx, id := range sortedStorages {
  127. if idx == 0 {
  128. used.Add(id, int64(reqSize))
  129. }
  130. ret = append(ret, id)
  131. }
  132. return ret
  133. }