instancegroup_select.go 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362
  1. // Copyright 2019 Yunion
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package core
  15. import (
  16. "fmt"
  17. "sort"
  18. "yunion.io/x/log"
  19. "yunion.io/x/pkg/errors"
  20. "yunion.io/x/pkg/tristate"
  21. "yunion.io/x/pkg/util/sets"
  22. schedapi "yunion.io/x/onecloud/pkg/apis/scheduler"
  23. "yunion.io/x/onecloud/pkg/compute/models"
  24. "yunion.io/x/onecloud/pkg/scheduler/api"
  25. )
  26. func transToInstanceGroupSchedResult(result *SchedResultItemList, schedInfo *api.SchedInfo) *schedapi.ScheduleOutput {
  27. for _, item := range result.Data {
  28. item.Count = 0
  29. }
  30. guestInfos, backGuestInfos, groups := generateGuestInfo(schedInfo)
  31. hosts := buildHosts(result, groups)
  32. if len(backGuestInfos) > 0 {
  33. return getBackupSchedResult(hosts, guestInfos, backGuestInfos, schedInfo.SessionId)
  34. }
  35. return getSchedResult(hosts, guestInfos, schedInfo.SessionId)
  36. }
  37. type sGuestInfo struct {
  38. schedInfo *api.SchedInfo
  39. instanceGroupsDetail map[string]*models.SGroup
  40. preferHost string
  41. }
  42. type sSchedResultItem struct {
  43. *SchedResultItem
  44. instanceGroupCapacity map[string]int64
  45. masterCount int64
  46. backupCount int64
  47. }
  48. func (item *sSchedResultItem) minInstanceGroupCapacity(groupSet map[string]*models.SGroup) int64 {
  49. var mincapa int64 = -1
  50. for id, capa := range item.instanceGroupCapacity {
  51. if _, ok := groupSet[id]; !ok {
  52. continue
  53. }
  54. if mincapa == -1 || capa < mincapa {
  55. mincapa = capa
  56. }
  57. }
  58. return mincapa
  59. }
  60. func buildHosts(result *SchedResultItemList, groups map[string]*models.SGroup) []*sSchedResultItem {
  61. hosts := make([]*sSchedResultItem, result.Data.Len())
  62. for i := 0; i < len(result.Data); i++ {
  63. getter := result.Data[i].Candidater.Getter()
  64. igCapacity := make(map[string]int64)
  65. for id, group := range groups {
  66. c, err := getter.GetFreeGroupCount(id)
  67. if err != nil {
  68. if errors.Cause(err) == ErrInstanceGroupNotFound {
  69. igCapacity[id] = int64(group.Granularity)
  70. } else {
  71. igCapacity[id] = 0
  72. log.Errorf("GetFreeGroupCount: %s", err.Error())
  73. }
  74. } else {
  75. igCapacity[id] = int64(c)
  76. }
  77. }
  78. hosts[i] = &sSchedResultItem{
  79. SchedResultItem: result.Data[i],
  80. instanceGroupCapacity: igCapacity,
  81. }
  82. }
  83. return hosts
  84. }
  85. // sortHost sorts the host for guest that is the backup one of the high-availability guest
  86. // if isBackup is true and the master one if isBackup is false.
  87. func sortHosts(hosts []*sSchedResultItem, guestInfo *sGuestInfo, isBackup *bool) {
  88. sortIndexi, sortIndexj := make([]int64, 5), make([]int64, 5)
  89. sort.Slice(hosts, func(i, j int) bool {
  90. switch {
  91. case isBackup == nil:
  92. sortIndexi[0], sortIndexj[0] = hosts[i].Count, hosts[j].Count
  93. case *isBackup:
  94. sortIndexi[0], sortIndexj[0] = hosts[i].backupCount, hosts[j].backupCount
  95. default:
  96. sortIndexi[0], sortIndexj[0] = hosts[i].masterCount, hosts[j].masterCount
  97. }
  98. sortIndexi[1], sortIndexj[1] = hosts[i].Count, hosts[j].Count
  99. sortIndexi[2], sortIndexj[2] = -(hosts[i].minInstanceGroupCapacity(guestInfo.instanceGroupsDetail)), -(hosts[j].minInstanceGroupCapacity(guestInfo.instanceGroupsDetail))
  100. iScore, jScore := scoreNormalization(hosts[i].Score, hosts[j].Score)
  101. sortIndexi[3], sortIndexj[3] = -iScore, -jScore
  102. sortIndexi[4], sortIndexj[4] = -(hosts[i].Capacity), -(hosts[j].Capacity)
  103. for i := 0; i < 5; i++ {
  104. if sortIndexi[i] == sortIndexj[i] {
  105. continue
  106. }
  107. return sortIndexi[i] < sortIndexj[i]
  108. }
  109. return true
  110. })
  111. }
  112. // scoreNormalization compare the value of s1 and s2.
  113. // If s1 is less than s2, return 1, 0 which means s2 is better than s1.
  114. func scoreNormalization(s1, s2 Score) (int64, int64) {
  115. preferScore1, preferScore2 := s1.PreferScore()-s1.AvoidScore(), s2.PreferScore()-s2.AvoidScore()
  116. normalScore1, normalScore2 := s1.NormalScore(), s2.NormalScore()
  117. if preferScore1 < preferScore2 {
  118. return 0, 1
  119. }
  120. if preferScore1 > preferScore2 {
  121. return 1, 0
  122. }
  123. if normalScore1 < normalScore2 {
  124. return 0, 1
  125. }
  126. if normalScore1 > normalScore2 {
  127. return 1, 0
  128. }
  129. return 0, 0
  130. }
  131. // buildWireHosts classify hosts according to their wire
  132. func buildWireHosts(hosts []*sSchedResultItem) map[string][]*sSchedResultItem {
  133. wireHostMap := make(map[string][]*sSchedResultItem)
  134. for _, host := range hosts {
  135. networks := host.Candidater.Getter().Networks()
  136. for j := 0; j < len(networks); j++ {
  137. if hosts, ok := wireHostMap[networks[j].WireId]; ok {
  138. if hostsIndex(host.ID, hosts) < 0 {
  139. wireHostMap[networks[j].WireId] = append(hosts, host)
  140. }
  141. } else {
  142. wireHostMap[networks[j].WireId] = []*sSchedResultItem{host}
  143. }
  144. }
  145. }
  146. return wireHostMap
  147. }
  148. // generateGuestInfo return guestInfos, backupGuestInfos and all instanceGroups
  149. func generateGuestInfo(schedInfo *api.SchedInfo) ([]sGuestInfo, []sGuestInfo, map[string]*models.SGroup) {
  150. infos := make([]sGuestInfo, 0, schedInfo.Count)
  151. infobs := make([]sGuestInfo, 0, schedInfo.Count)
  152. groups := make(map[string]*models.SGroup)
  153. name := schedInfo.Name
  154. if len(name) == 0 {
  155. name = "default"
  156. }
  157. for id, group := range schedInfo.InstanceGroupsDetail {
  158. groups[id] = group
  159. }
  160. for i := 0; i < schedInfo.Count; i++ {
  161. info := sGuestInfo{
  162. schedInfo: schedInfo,
  163. instanceGroupsDetail: make(map[string]*models.SGroup),
  164. preferHost: schedInfo.PreferHost,
  165. }
  166. for id, group := range schedInfo.InstanceGroupsDetail {
  167. info.instanceGroupsDetail[id] = group
  168. }
  169. infos = append(infos, info)
  170. if !schedInfo.Backup {
  171. continue
  172. }
  173. infob := sGuestInfo{
  174. schedInfo: schedInfo,
  175. preferHost: schedInfo.PreferBackupHost,
  176. instanceGroupsDetail: make(map[string]*models.SGroup),
  177. }
  178. infobs = append(infobs, infob)
  179. // Virtual an instanceGroup
  180. group := models.SGroup{
  181. Granularity: 1,
  182. ForceDispersion: tristate.True,
  183. }
  184. groupid := fmt.Sprintf("virtual-%s-%d", name, i)
  185. group.Id = groupid
  186. infos[i].instanceGroupsDetail[groupid] = &group
  187. infobs[i].instanceGroupsDetail[groupid] = &group
  188. groups[groupid] = &group
  189. }
  190. return infos, infobs, groups
  191. }
  192. func hostsIndex(hostId string, hosts []*sSchedResultItem) int {
  193. for i := 0; i < len(hosts); i++ {
  194. if hosts[i].ID == hostId {
  195. return i
  196. }
  197. }
  198. return -1
  199. }
  200. // getBackupSchedResult return the ScheduleOutput for guest without backup
  201. func getSchedResult(hosts []*sSchedResultItem, guestInfos []sGuestInfo, sid string) *schedapi.ScheduleOutput {
  202. apiResults := make([]*schedapi.CandidateResource, 0)
  203. storageUsed :=
  204. NewStorageUsed()
  205. var i int = 0
  206. for ; i < len(guestInfos); i++ {
  207. host := selectHost(hosts, guestInfos[i], nil, true)
  208. if host == nil {
  209. host = selectHost(hosts, guestInfos[i], nil, false)
  210. if host == nil {
  211. er := &schedapi.CandidateResource{Error: fmt.Sprintf("no suitable Host for No.%d Guest", i+1)}
  212. apiResults = append(apiResults, er)
  213. break
  214. }
  215. }
  216. markHostUsed(host, guestInfos[i], nil)
  217. tr := host.ToCandidateResource(storageUsed)
  218. tr.SessionId = sid
  219. apiResults = append(apiResults, tr)
  220. }
  221. for ; i < len(guestInfos); i++ {
  222. er := &schedapi.CandidateResource{Error: fmt.Sprintf("no suitable Host for No.%d Guest", i+1)}
  223. apiResults = append(apiResults, er)
  224. }
  225. ret := new(schedapi.ScheduleOutput)
  226. ret.Candidates = apiResults
  227. return ret
  228. }
  229. // getBackupSchedResult return the ScheduleOutput for guest with backup
  230. func getBackupSchedResult(hosts []*sSchedResultItem, guestInfos, backGuestInfos []sGuestInfo, sid string) *schedapi.ScheduleOutput {
  231. wireHostMap := buildWireHosts(hosts)
  232. apiResults := make([]*schedapi.CandidateResource, 0, len(guestInfos))
  233. nowireIds := sets.NewString()
  234. storageUsed := NewStorageUsed()
  235. isBackup := true
  236. isMaster := false
  237. for i := 0; i < len(guestInfos); i++ {
  238. for wireid, hosts := range wireHostMap {
  239. if nowireIds.Has(wireid) {
  240. continue
  241. }
  242. masterItem := selectHost(hosts, guestInfos[i], &isMaster, true)
  243. if masterItem == nil {
  244. masterItem = selectHost(hosts, guestInfos[i], &isMaster, false)
  245. if masterItem == nil {
  246. nowireIds.Insert(wireid)
  247. continue
  248. }
  249. }
  250. // mark master used for now
  251. markHostUsed(masterItem, guestInfos[i], &isMaster)
  252. backupItem := selectHost(hosts, backGuestInfos[i], &isBackup, false)
  253. if backupItem == nil {
  254. nowireIds.Insert(wireid)
  255. unMarkHostUsed(masterItem, guestInfos[i], &isMaster)
  256. continue
  257. }
  258. markHostUsed(backupItem, backGuestInfos[i], &isBackup)
  259. canRe := masterItem.ToCandidateResource(storageUsed)
  260. canRe.BackupCandidate = backupItem.ToCandidateResource(storageUsed)
  261. canRe.SessionId = sid
  262. canRe.BackupCandidate.SessionId = sid
  263. apiResults = append(apiResults, canRe)
  264. break
  265. }
  266. if len(apiResults) == i+1 {
  267. continue
  268. }
  269. er := &schedapi.CandidateResource{Error: fmt.Sprintf("no suitable Host for No.%d Highly available Guest", i+1)}
  270. apiResults = append(apiResults, er)
  271. }
  272. ret := new(schedapi.ScheduleOutput)
  273. ret.Candidates = apiResults
  274. return ret
  275. }
  276. func markHostUsed(host *sSchedResultItem, guestInfo sGuestInfo, isBackup *bool) {
  277. for gid := range guestInfo.instanceGroupsDetail {
  278. host.instanceGroupCapacity[gid] = host.instanceGroupCapacity[gid] - 1
  279. }
  280. host.Capacity--
  281. host.Count++
  282. if isBackup == nil {
  283. return
  284. }
  285. if *isBackup {
  286. host.backupCount++
  287. } else {
  288. host.masterCount++
  289. }
  290. }
  291. // unMarkHostUsed is the reverse operation of markHostUsed
  292. func unMarkHostUsed(host *sSchedResultItem, guestInfo sGuestInfo, isBackup *bool) {
  293. for gid := range guestInfo.instanceGroupsDetail {
  294. host.instanceGroupCapacity[gid] = host.instanceGroupCapacity[gid] + 1
  295. }
  296. host.Capacity++
  297. host.Count--
  298. if isBackup == nil {
  299. return
  300. }
  301. if *isBackup {
  302. host.backupCount--
  303. } else {
  304. host.masterCount--
  305. }
  306. }
  307. // selectHost select host from hosts for guest described by guestInfo.
  308. // If forced is true, all instanceGroups will be forced.
  309. // Otherwise, the instanceGroups with ForceDispersion 'false' will be unforced.
  310. func selectHost(hosts []*sSchedResultItem, guestInfo sGuestInfo, isBackup *bool, forced bool) *sSchedResultItem {
  311. sortHosts(hosts, &guestInfo, isBackup)
  312. var idx = -1
  313. if len(guestInfo.preferHost) > 0 {
  314. if idx = hostsIndex(guestInfo.preferHost, hosts); idx < 0 {
  315. return nil
  316. }
  317. }
  318. var choosed bool
  319. Loop:
  320. for i, host := range hosts {
  321. if idx >= 0 && idx != i {
  322. continue
  323. }
  324. if host.Capacity <= 0 {
  325. continue
  326. }
  327. // check forced instanceGroup
  328. for id, group := range guestInfo.instanceGroupsDetail {
  329. capacity := host.instanceGroupCapacity[id]
  330. checkCapacity := forced || group.ForceDispersion.IsTrue()
  331. if checkCapacity && capacity <= 0 {
  332. continue Loop
  333. }
  334. }
  335. idx = i
  336. choosed = true
  337. break
  338. }
  339. if choosed {
  340. return hosts[idx]
  341. }
  342. return nil
  343. }