hosts.go 49 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807
  1. // Copyright 2019 Yunion
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package candidate
  15. import (
  16. "context"
  17. "encoding/json"
  18. "sort"
  19. gosync "sync"
  20. "time"
  21. "yunion.io/x/jsonutils"
  22. "yunion.io/x/log"
  23. "yunion.io/x/pkg/errors"
  24. "yunion.io/x/pkg/util/sets"
  25. "yunion.io/x/pkg/utils"
  26. "yunion.io/x/sqlchemy"
  27. computeapi "yunion.io/x/onecloud/pkg/apis/compute"
  28. hostapi "yunion.io/x/onecloud/pkg/apis/host"
  29. "yunion.io/x/onecloud/pkg/apis/scheduler"
  30. computedb "yunion.io/x/onecloud/pkg/cloudcommon/db"
  31. "yunion.io/x/onecloud/pkg/compute/baremetal"
  32. computemodels "yunion.io/x/onecloud/pkg/compute/models"
  33. "yunion.io/x/onecloud/pkg/scheduler/core"
  34. schedmodels "yunion.io/x/onecloud/pkg/scheduler/models"
  35. o "yunion.io/x/onecloud/pkg/scheduler/options"
  36. "yunion.io/x/onecloud/pkg/util/cgrouputils/cpuset"
  37. )
  38. type hostGetter struct {
  39. *baseHostGetter
  40. h *HostDesc
  41. }
  42. func newHostGetter(h *HostDesc) *hostGetter {
  43. return &hostGetter{
  44. baseHostGetter: newBaseHostGetter(h.BaseHostDesc),
  45. h: h,
  46. }
  47. }
  48. func (h *hostGetter) CreatingGuestCount() int {
  49. return int(h.h.CreatingGuestCount)
  50. }
  51. func (h *hostGetter) RunningCPUCount() int64 {
  52. return h.h.RunningCPUCount
  53. }
  54. func (h *hostGetter) TotalCPUCount(useRsvd bool) int64 {
  55. return h.h.GetTotalCPUCount(useRsvd)
  56. }
  57. func (h *hostGetter) FreeCPUCount(useRsvd bool) int64 {
  58. return h.h.GetFreeCPUCount(useRsvd)
  59. }
  60. func (h *hostGetter) FreeMemorySize(useRsvd bool) int64 {
  61. return h.h.GetFreeMemSize(useRsvd)
  62. }
  63. func (h *hostGetter) NumaAllocateEnabled() bool {
  64. return h.h.HostTopo.NumaEnabled || h.h.HostType == computeapi.HOST_TYPE_CONTAINER
  65. }
  66. func (h *hostGetter) GetFreeCpuNuma() []*scheduler.SFreeNumaCpuMem {
  67. return h.h.GetFreeCpuNuma()
  68. }
  69. func (h *hostGetter) RunningMemorySize() int64 {
  70. return h.h.RunningMemSize
  71. }
  72. func (h *hostGetter) TotalMemorySize(useRsvd bool) int64 {
  73. return h.h.GetTotalMemSize(useRsvd)
  74. }
  75. func (h *hostGetter) IsEmpty() bool {
  76. return h.h.GuestCount == 0
  77. }
  78. func (h *hostGetter) StorageInfo() []*baremetal.BaremetalStorage {
  79. return nil
  80. }
  81. func (h *hostGetter) GetFreeStorageSizeOfType(storageType string, mediumType string, useRsvd bool, reqMaxSize int64) (int64, int64, error) {
  82. return h.h.GetFreeStorageSizeOfType(storageType, mediumType, useRsvd, reqMaxSize)
  83. }
  84. func (h *hostGetter) GetFreePort(netId string) int {
  85. return h.h.GetFreePort(netId)
  86. }
  87. func (h *hostGetter) OvnCapable() bool {
  88. return len(h.h.OvnVersion) > 0
  89. }
  90. type HostDesc struct {
  91. *BaseHostDesc
  92. // cpu
  93. CPUCmtbound float32 `json:"cpu_cmtbound"`
  94. CPUBoundCount int64 `json:"cpu_bound_count"`
  95. CPULoad *float64 `json:"cpu_load"`
  96. TotalCPUCount int64 `json:"total_cpu_count"`
  97. RunningCPUCount int64 `json:"running_cpu_count"`
  98. CreatingCPUCount int64 `json:"creating_cpu_count"`
  99. RequiredCPUCount int64 `json:"required_cpu_count"`
  100. FakeDeletedCPUCount int64 `json:"fake_deleted_cpu_count"`
  101. FreeCPUCount int64 `json:"free_cpu_count"`
  102. // memory
  103. MemCmtbound float32 `json:"mem_cmtbound"`
  104. TotalMemSize int64 `json:"total_mem_size"`
  105. FreeMemSize int64 `json:"free_mem_size"`
  106. RunningMemSize int64 `json:"running_mem_size"`
  107. CreatingMemSize int64 `json:"creating_mem_size"`
  108. RequiredMemSize int64 `json:"required_mem_size"`
  109. FakeDeletedMemSize int64 `json:"fake_deleted_mem_size"`
  110. EnableCpuNumaAllocate bool `json:"enable_cpu_numa_allocate"`
  111. HostTopo *SHostTopo `json:"host_topo"`
  112. // storage
  113. StorageTypes []string `json:"storage_types"`
  114. // IO
  115. IOBoundCount int64 `json:"io_bound_count"`
  116. IOLoad *float64 `json:"io_load"`
  117. // server
  118. GuestCount int64 `json:"guest_count"`
  119. CreatingGuestCount int64 `json:"creating_guest_count"`
  120. RunningGuestCount int64 `json:"running_guest_count"`
  121. //Groups *GroupCounts `json:"groups"`
  122. Metadata map[string]string `json:"metadata"`
  123. IsMaintenance bool `json:"is_maintenance"`
  124. GuestReservedResource *ReservedResource `json:"guest_reserved_resource"`
  125. GuestReservedResourceUsed *ReservedResource `json:"guest_reserved_used"`
  126. }
  127. type CPUFree struct {
  128. Cpu int
  129. Free int
  130. }
  131. type SorttedCPUFree []*CPUFree
  132. func (pq *SorttedCPUFree) LoadCpu(cpuId int) {
  133. for i := range *pq {
  134. if (*pq)[i].Cpu == cpuId {
  135. (*pq)[i].Free -= 1
  136. }
  137. }
  138. }
  139. func (pq SorttedCPUFree) Len() int { return len(pq) }
  140. func (pq SorttedCPUFree) Less(i, j int) bool {
  141. return pq[i].Free > pq[j].Free
  142. }
  143. func (pq SorttedCPUFree) Swap(i, j int) {
  144. pq[i], pq[j] = pq[j], pq[i]
  145. }
  146. func (pq *SorttedCPUFree) Push(item interface{}) {
  147. *pq = append(*pq, item.(*CPUFree))
  148. }
  149. func (pq *SorttedCPUFree) Pop() interface{} {
  150. old := *pq
  151. n := len(old)
  152. item := old[n-1]
  153. old[n-1] = nil // avoid memory leak
  154. *pq = old[0 : n-1]
  155. return item
  156. }
  157. type CPUDie struct {
  158. LogicalProcessors cpuset.CPUSet
  159. // Core thread id maps
  160. CoreThreadIdMaps map[int]int
  161. CpuFree SorttedCPUFree
  162. VcpuCount int
  163. }
  164. func (d *CPUDie) initCpuFree(cpuCmtbound int) {
  165. cpuFree := make([]*CPUFree, 0)
  166. for _, cpuId := range d.LogicalProcessors.ToSliceNoSort() {
  167. cpuFree = append(cpuFree, &CPUFree{cpuId, cpuCmtbound})
  168. }
  169. d.CpuFree = cpuFree
  170. sort.Sort(d.CpuFree)
  171. }
  172. type SorttedCPUDie []*CPUDie
  173. func (pq SorttedCPUDie) Len() int { return len(pq) }
  174. func (pq SorttedCPUDie) Less(i, j int) bool {
  175. return pq[i].VcpuCount < pq[j].VcpuCount
  176. }
  177. func (pq SorttedCPUDie) Swap(i, j int) {
  178. pq[i], pq[j] = pq[j], pq[i]
  179. }
  180. func (pq *SorttedCPUDie) Push(item interface{}) {
  181. *pq = append(*pq, item.(*CPUDie))
  182. }
  183. func (pq *SorttedCPUDie) Pop() interface{} {
  184. old := *pq
  185. n := len(old)
  186. item := old[n-1]
  187. old[n-1] = nil // avoid memory leak
  188. *pq = old[0 : n-1]
  189. return item
  190. }
  191. func (pq *SorttedCPUDie) LoadCpus(cpus []int, vcpuCount int) {
  192. var cpuDies = map[int][]int{}
  193. for i := 0; i < len(cpus); i++ {
  194. for j := 0; j < len(*pq); j++ {
  195. if (*pq)[j].LogicalProcessors.Contains(cpus[i]) {
  196. if cpuDie, ok := cpuDies[j]; !ok {
  197. cpuDies[j] = []int{cpus[i]}
  198. } else {
  199. cpuDies[j] = append(cpuDie, cpus[i])
  200. }
  201. break
  202. }
  203. }
  204. }
  205. for i := 0; i < len(*pq); i++ {
  206. if cpus, ok := cpuDies[i]; ok {
  207. d := (*pq)[i]
  208. for _, cpu := range cpus {
  209. d.CpuFree.LoadCpu(cpu)
  210. }
  211. d.VcpuCount += vcpuCount
  212. sort.Sort(d.CpuFree)
  213. }
  214. }
  215. sort.Sort(pq)
  216. }
  217. type NumaNode struct {
  218. CpuDies SorttedCPUDie
  219. LogicalProcessors cpuset.CPUSet
  220. VcpuCount int
  221. CpuCount int
  222. NodeId int
  223. Distances []int
  224. NumaNodeMemSizeKB int
  225. NumaNodeFreeMemSizeKB int
  226. }
  227. func (n *NumaNode) nodeEnough(vcpuCount, memSizeKB int, cmtBound float32, enableNumaAlloc bool) bool {
  228. if int(float32(n.CpuCount)*cmtBound)-n.VcpuCount < vcpuCount {
  229. return false
  230. }
  231. if enableNumaAlloc {
  232. if n.NumaNodeFreeMemSizeKB < memSizeKB {
  233. return false
  234. }
  235. }
  236. return true
  237. }
  238. func (n *NumaNode) allocCpusetSequenceN(vcpuCount int, usedCpu map[int]int) {
  239. var seqNumber = o.Options.GuestCpusetAllocSequenceInterval
  240. if vcpuCount%seqNumber != 0 || n.CpuCount/len(n.CpuDies) < vcpuCount {
  241. n._allocCpuset(vcpuCount, usedCpu)
  242. return
  243. }
  244. for i := range n.CpuDies {
  245. detectedSet := cpuset.NewCPUSet()
  246. for j := range n.CpuDies[i].CpuFree {
  247. if detectedSet.Contains(n.CpuDies[i].CpuFree[j].Cpu) {
  248. continue
  249. }
  250. cpuIdBase := n.CpuDies[i].CpuFree[j].Cpu - n.CpuDies[i].CpuFree[j].Cpu%vcpuCount
  251. lo, hi := cpuIdBase, cpuIdBase+vcpuCount-1
  252. cpuIds := make([]int, hi-lo+1)
  253. for m := range cpuIds {
  254. cpuIds[m] = m + lo
  255. }
  256. var matched = true
  257. cpuIdSet := cpuset.NewCPUSet(cpuIds...)
  258. detectedSet = detectedSet.Union(cpuIdSet)
  259. for k := range n.CpuDies[i].CpuFree {
  260. if !cpuIdSet.Contains(n.CpuDies[i].CpuFree[k].Cpu) {
  261. continue
  262. }
  263. if n.CpuDies[i].CpuFree[k].Free <= 0 {
  264. matched = false
  265. break
  266. }
  267. }
  268. if !matched {
  269. continue
  270. }
  271. for m := range cpuIds {
  272. usedCpu[cpuIds[m]] = 1
  273. }
  274. return
  275. }
  276. }
  277. n._allocCpuset(vcpuCount, usedCpu)
  278. }
  279. func (n *NumaNode) allocCpuset(vcpuCount int, usedCpu map[int]int) {
  280. if o.Options.GuestCpusetAllocSequence {
  281. n.allocCpusetSequenceN(vcpuCount, usedCpu)
  282. return
  283. }
  284. n._allocCpuset(vcpuCount, usedCpu)
  285. }
  286. func (n *NumaNode) _allocCpuset(vcpuCount int, usedCpu map[int]int) {
  287. for i := range n.CpuDies {
  288. for j := range n.CpuDies[i].CpuFree {
  289. cpuId, nFree := n.CpuDies[i].CpuFree[j].Cpu, n.CpuDies[i].CpuFree[j].Free
  290. if cnt, ok := usedCpu[cpuId]; ok {
  291. if cnt < nFree {
  292. usedCpu[cpuId] = cnt + 1
  293. vcpuCount -= 1
  294. if vcpuCount <= 0 {
  295. return
  296. }
  297. }
  298. } else {
  299. if nFree > 0 {
  300. usedCpu[cpuId] = 1
  301. vcpuCount -= 1
  302. if vcpuCount <= 0 {
  303. return
  304. }
  305. }
  306. }
  307. if pairCpuId, ok := n.CpuDies[i].CoreThreadIdMaps[cpuId]; ok {
  308. for k := range n.CpuDies[i].CpuFree {
  309. if n.CpuDies[i].CpuFree[k].Cpu == pairCpuId {
  310. pairNFree := n.CpuDies[i].CpuFree[k].Free
  311. if cnt, ok := usedCpu[pairCpuId]; ok {
  312. if cnt < pairNFree {
  313. usedCpu[pairCpuId] = cnt + 1
  314. vcpuCount -= 1
  315. if vcpuCount <= 0 {
  316. return
  317. }
  318. }
  319. } else {
  320. if pairNFree > 0 {
  321. usedCpu[pairCpuId] = 1
  322. vcpuCount -= 1
  323. if vcpuCount <= 0 {
  324. return
  325. }
  326. }
  327. }
  328. }
  329. }
  330. }
  331. }
  332. //sort.Sort(n.CpuDies[i].CpuFree)
  333. }
  334. n._allocCpuset(vcpuCount, usedCpu)
  335. }
  336. func (n *NumaNode) AllocCpuset(vcpuCount int) []int {
  337. if vcpuCount <= 0 {
  338. return nil
  339. }
  340. var usedCpuCount = make(map[int]int)
  341. n.allocCpuset(vcpuCount, usedCpuCount)
  342. var ret = make([]int, 0)
  343. for cpuId, cnt := range usedCpuCount {
  344. for cnt > 0 {
  345. ret = append(ret, cpuId)
  346. cnt -= 1
  347. }
  348. }
  349. return ret
  350. }
  351. func NewNumaNode(nodeId int, nodeDistances []int, hugepageSizeKb int, nodeHugepages []hostapi.HostNodeHugepageNr, memSizeKB int, memCmtBound float32) *NumaNode {
  352. n := new(NumaNode)
  353. n.LogicalProcessors = cpuset.NewCPUSet()
  354. n.NodeId = nodeId
  355. n.Distances = nodeDistances
  356. if len(nodeHugepages) > 0 {
  357. for i := range nodeHugepages {
  358. if nodeHugepages[i].NodeId == nodeId {
  359. n.NumaNodeMemSizeKB = nodeHugepages[i].HugepageNr * hugepageSizeKb
  360. }
  361. }
  362. } else {
  363. n.NumaNodeMemSizeKB = int(float32(memSizeKB) * memCmtBound)
  364. }
  365. n.NumaNodeFreeMemSizeKB = n.NumaNodeMemSizeKB
  366. return n
  367. }
  368. type SHostTopo struct {
  369. Nodes []*NumaNode
  370. NumaEnabled bool
  371. CPUCmtbound float32
  372. HostName string
  373. }
  374. func HostTopoSubPendingUsage(topo *SHostTopo, cpuUsage map[int]int, numaMemUsage map[int]int) *SHostTopo {
  375. res := new(SHostTopo)
  376. res.NumaEnabled = topo.NumaEnabled
  377. res.CPUCmtbound = topo.CPUCmtbound
  378. res.Nodes = make([]*NumaNode, len(topo.Nodes))
  379. for i := range topo.Nodes {
  380. res.Nodes[i] = new(NumaNode)
  381. res.Nodes[i].LogicalProcessors = topo.Nodes[i].LogicalProcessors.Clone()
  382. res.Nodes[i].VcpuCount = topo.Nodes[i].VcpuCount
  383. res.Nodes[i].CpuCount = topo.Nodes[i].CpuCount
  384. res.Nodes[i].NodeId = topo.Nodes[i].NodeId
  385. res.Nodes[i].NumaNodeMemSizeKB = topo.Nodes[i].NumaNodeMemSizeKB
  386. res.Nodes[i].NumaNodeFreeMemSizeKB = topo.Nodes[i].NumaNodeFreeMemSizeKB
  387. res.Nodes[i].Distances = topo.Nodes[i].Distances
  388. if memUsed, ok := numaMemUsage[topo.Nodes[i].NodeId]; ok {
  389. res.Nodes[i].NumaNodeFreeMemSizeKB -= memUsed * 1024
  390. }
  391. res.Nodes[i].CpuDies = make([]*CPUDie, len(topo.Nodes[i].CpuDies))
  392. for j := range topo.Nodes[i].CpuDies {
  393. res.Nodes[i].CpuDies[j] = &CPUDie{
  394. LogicalProcessors: topo.Nodes[i].CpuDies[j].LogicalProcessors.Clone(),
  395. CpuFree: make(SorttedCPUFree, 0),
  396. VcpuCount: topo.Nodes[i].CpuDies[j].VcpuCount,
  397. }
  398. for k := range topo.Nodes[i].CpuDies[j].CpuFree {
  399. cpuFree := topo.Nodes[i].CpuDies[j].CpuFree[k]
  400. cpuId := cpuFree.Cpu
  401. free := cpuFree.Free
  402. if pending, ok := cpuUsage[cpuId]; ok {
  403. res.Nodes[i].CpuDies[j].CpuFree = append(res.Nodes[i].CpuDies[j].CpuFree, &CPUFree{cpuId, free - pending})
  404. res.Nodes[i].CpuDies[j].VcpuCount += pending
  405. res.Nodes[i].VcpuCount += pending
  406. } else {
  407. res.Nodes[i].CpuDies[j].CpuFree = append(res.Nodes[i].CpuDies[j].CpuFree, &CPUFree{cpuId, free})
  408. }
  409. }
  410. sort.Sort(res.Nodes[i].CpuDies[j].CpuFree)
  411. }
  412. }
  413. sort.Sort(res)
  414. return res
  415. }
  416. func (pq SHostTopo) Len() int { return len(pq.Nodes) }
  417. func (pq SHostTopo) Less(i, j int) bool {
  418. if pq.NumaEnabled {
  419. if pq.Nodes[i].NumaNodeFreeMemSizeKB == pq.Nodes[j].NumaNodeFreeMemSizeKB {
  420. return pq.Nodes[i].VcpuCount < pq.Nodes[j].VcpuCount
  421. }
  422. return pq.Nodes[i].NumaNodeFreeMemSizeKB > pq.Nodes[j].NumaNodeFreeMemSizeKB
  423. } else {
  424. return pq.Nodes[i].NumaNodeFreeMemSizeKB > pq.Nodes[j].NumaNodeFreeMemSizeKB
  425. }
  426. }
  427. func (pq SHostTopo) Swap(i, j int) {
  428. pq.Nodes[i], pq.Nodes[j] = pq.Nodes[j], pq.Nodes[i]
  429. }
  430. func (pq *SHostTopo) Push(item interface{}) {
  431. (*pq).Nodes = append((*pq).Nodes, item.(*NumaNode))
  432. }
  433. func (h *SHostTopo) LoadCpuNumaPin(guestsCpuNumaPin []scheduler.SCpuNumaPin) {
  434. for _, gCpuNumaPin := range guestsCpuNumaPin {
  435. var node *NumaNode
  436. for i := range h.Nodes {
  437. if h.Nodes[i].NodeId == gCpuNumaPin.NodeId {
  438. node = h.Nodes[i]
  439. }
  440. }
  441. cpus := gCpuNumaPin.CpuPin
  442. node.CpuDies.LoadCpus(cpus, len(cpus))
  443. if gCpuNumaPin.MemSizeMB != nil {
  444. node.NumaNodeFreeMemSizeKB -= *gCpuNumaPin.MemSizeMB * 1024
  445. }
  446. node.VcpuCount += len(cpus)
  447. }
  448. sort.Sort(h)
  449. }
  450. func (h *SHostTopo) nodesEnough(nodeCount, vcpuCount int, memSizeKB int) bool {
  451. var leastFree = memSizeKB / nodeCount
  452. var leastCpuCount = vcpuCount / nodeCount
  453. var remPcpuCount = vcpuCount % nodeCount
  454. for i := 0; i < nodeCount; i++ {
  455. if h.NumaEnabled {
  456. if h.Nodes[i].NumaNodeFreeMemSizeKB < leastFree {
  457. return false
  458. }
  459. }
  460. requireCpuCount := leastCpuCount
  461. if remPcpuCount > 0 {
  462. requireCpuCount += 1
  463. remPcpuCount -= 1
  464. }
  465. if (h.Nodes[i].VcpuCount + requireCpuCount) > int(float32(h.Nodes[i].CpuCount)*h.CPUCmtbound) {
  466. return false
  467. }
  468. }
  469. return true
  470. }
  471. func (h *SHostTopo) allocCpuNumaNodesByPreferNodes(
  472. vcpuCount, memSizeKB, nodeCount int, sortedNumaDistance []SSortedNumaDistance,
  473. ) []scheduler.SCpuNumaPin {
  474. res := make([]scheduler.SCpuNumaPin, 0)
  475. var nodeAllocSize = memSizeKB / nodeCount
  476. var pcpuCount = vcpuCount / nodeCount
  477. var remPcpuCount = vcpuCount % nodeCount
  478. allocatedNode := 0
  479. for i := range sortedNumaDistance {
  480. if allocatedNode >= nodeCount {
  481. break
  482. }
  483. var npcpuCount = pcpuCount
  484. if remPcpuCount > 0 {
  485. npcpuCount += 1
  486. remPcpuCount -= 1
  487. }
  488. nodeIdx := sortedNumaDistance[i].NodeIndex
  489. if h.Nodes[nodeIdx].nodeEnough(vcpuCount, memSizeKB, h.CPUCmtbound, h.NumaEnabled) {
  490. cpuNumaPin := scheduler.SCpuNumaPin{
  491. CpuPin: h.Nodes[nodeIdx].AllocCpuset(npcpuCount),
  492. NodeId: h.Nodes[nodeIdx].NodeId,
  493. }
  494. allocSize := nodeAllocSize / 1024
  495. cpuNumaPin.MemSizeMB = &allocSize
  496. res = append(res, cpuNumaPin)
  497. allocatedNode += 1
  498. } else {
  499. log.Infof("%s node %v not enough", h.HostName, h.Nodes[i])
  500. }
  501. log.Infof("node %d, free mems %d", h.Nodes[nodeIdx].NodeId, h.Nodes[nodeIdx].NumaNodeFreeMemSizeKB)
  502. }
  503. if allocatedNode < nodeCount {
  504. return nil
  505. }
  506. return res
  507. }
  508. type SSortedNumaDistance struct {
  509. NodeIndex int
  510. Distance int
  511. FreeMemSize int
  512. }
  513. func (h *SHostTopo) getDistancesSeqByPreferNodes(preferNumaNodes []int, memSizeKB int) []SSortedNumaDistance {
  514. sortedNumaDistance := make([]SSortedNumaDistance, len(h.Nodes))
  515. for i := range h.Nodes {
  516. distance := 0
  517. for j := range preferNumaNodes {
  518. log.Infof("node distance %v", h.Nodes[i].Distances)
  519. distance += h.Nodes[i].Distances[preferNumaNodes[j]]
  520. }
  521. sortedNumaDistance[i] = SSortedNumaDistance{
  522. NodeIndex: i,
  523. Distance: distance,
  524. FreeMemSize: h.Nodes[i].NumaNodeFreeMemSizeKB,
  525. }
  526. }
  527. sort.Slice(sortedNumaDistance, func(i, j int) bool {
  528. // 7 is tolerant max distances
  529. if sortedNumaDistance[i].Distance > (7 + sortedNumaDistance[j].Distance) {
  530. return false
  531. } else if (sortedNumaDistance[i].Distance + 7) < sortedNumaDistance[j].Distance {
  532. return true
  533. }
  534. if sortedNumaDistance[i].Distance < sortedNumaDistance[j].Distance {
  535. return sortedNumaDistance[i].FreeMemSize > memSizeKB && sortedNumaDistance[j].FreeMemSize-sortedNumaDistance[i].FreeMemSize <= 2*memSizeKB
  536. } else {
  537. return sortedNumaDistance[j].FreeMemSize > memSizeKB && sortedNumaDistance[i].FreeMemSize-sortedNumaDistance[j].FreeMemSize >= 2*memSizeKB
  538. }
  539. })
  540. return sortedNumaDistance
  541. }
  542. func (h *SHostTopo) AllocCpuNumaNodes(vcpuCount, memSizeKB int, ignoreMemSingular bool, preferNumaNodes []int) []scheduler.SCpuNumaPin {
  543. if h.NumaEnabled && len(preferNumaNodes) > 0 {
  544. log.Infof("preferNumaNodes %v", preferNumaNodes)
  545. sortedNumaDistance := h.getDistancesSeqByPreferNodes(preferNumaNodes, memSizeKB)
  546. for nodeCount := 1; nodeCount <= len(h.Nodes); nodeCount *= 2 {
  547. ret := h.allocCpuNumaNodesByPreferNodes(vcpuCount, memSizeKB, nodeCount, sortedNumaDistance)
  548. if ret != nil {
  549. return ret
  550. }
  551. }
  552. }
  553. res := make([]scheduler.SCpuNumaPin, 0)
  554. for nodeCount := 1; nodeCount <= len(h.Nodes); nodeCount *= 2 {
  555. if ok := h.nodesEnough(nodeCount, vcpuCount, memSizeKB); !ok {
  556. log.Infof("host %s node count %d not enough", h.HostName, nodeCount)
  557. continue
  558. }
  559. log.Infof("use node count %d", nodeCount)
  560. var nodeAllocSize = memSizeKB / nodeCount
  561. if h.NumaEnabled && !ignoreMemSingular {
  562. if nodeAllocSize/1024%1024 > 0 {
  563. log.Infof("host %s node alloc size singular %d", h.HostName, nodeAllocSize)
  564. continue
  565. }
  566. }
  567. var pcpuCount = vcpuCount / nodeCount
  568. var remPcpuCount = vcpuCount % nodeCount
  569. for i := 0; i < nodeCount; i++ {
  570. var npcpuCount = pcpuCount
  571. if remPcpuCount > 0 {
  572. npcpuCount += 1
  573. remPcpuCount -= 1
  574. }
  575. cpuNumaPin := scheduler.SCpuNumaPin{
  576. CpuPin: h.Nodes[i].AllocCpuset(npcpuCount),
  577. NodeId: h.Nodes[i].NodeId,
  578. }
  579. if h.NumaEnabled {
  580. allocSize := nodeAllocSize / 1024
  581. cpuNumaPin.MemSizeMB = &allocSize
  582. }
  583. res = append(res, cpuNumaPin)
  584. }
  585. break
  586. }
  587. return res
  588. }
  589. func (h *SHostTopo) AllocCpuNumaNodesWithNodeCount(vcpuCount, memSizeKB, nodeCount int) []scheduler.SCpuNumaPin {
  590. res := make([]scheduler.SCpuNumaPin, 0)
  591. var nodeAllocSize = memSizeKB / nodeCount
  592. var pcpuCount = vcpuCount / nodeCount
  593. var remPcpuCount = vcpuCount % nodeCount
  594. for i := 0; i < nodeCount; i++ {
  595. var npcpuCount = pcpuCount
  596. if remPcpuCount > 0 {
  597. npcpuCount += 1
  598. remPcpuCount -= 1
  599. }
  600. cpuNumaPin := scheduler.SCpuNumaPin{
  601. CpuPin: h.Nodes[i].AllocCpuset(npcpuCount),
  602. NodeId: h.Nodes[i].NodeId,
  603. }
  604. if h.NumaEnabled {
  605. allocSize := nodeAllocSize / 1024
  606. cpuNumaPin.MemSizeMB = &allocSize
  607. }
  608. res = append(res, cpuNumaPin)
  609. }
  610. return res
  611. }
  612. func (b *HostBuilder) buildHostTopo(
  613. desc *HostDesc, reservedCpus *cpuset.CPUSet,
  614. hugepageSizeKb int, nodeHugepages []hostapi.HostNodeHugepageNr,
  615. info *hostapi.HostTopology,
  616. ) error {
  617. var numaEnabled = len(nodeHugepages) > 0
  618. hostTopo := new(SHostTopo)
  619. hostTopo.Nodes = make([]*NumaNode, len(info.Nodes))
  620. hasL3Cache := false
  621. for i := 0; i < len(info.Nodes); i++ {
  622. nodoMemSizeKB := 0
  623. if info.Nodes[i].Memory != nil {
  624. nodoMemSizeKB = int(info.Nodes[i].Memory.TotalUsableBytes/1024) - (desc.MemReserved * 1024 / len(info.Nodes))
  625. if desc.HostType == computeapi.HOST_TYPE_CONTAINER && o.Options.ContainerNumaAllocate {
  626. numaEnabled = true
  627. log.Infof("host %s ignore singular", desc.Name)
  628. }
  629. }
  630. node := NewNumaNode(info.Nodes[i].ID, info.Nodes[i].Distances, hugepageSizeKb, nodeHugepages, nodoMemSizeKB, desc.MemCmtbound)
  631. cpuDies := make([]*CPUDie, 0)
  632. for j := 0; j < len(info.Nodes[i].Caches); j++ {
  633. if info.Nodes[i].Caches[j].Level != 3 {
  634. continue
  635. }
  636. hasL3Cache = true
  637. cpuDie := new(CPUDie)
  638. cpuDie.CoreThreadIdMaps = make(map[int]int)
  639. dieBuilder := cpuset.NewBuilder()
  640. for k := 0; k < len(info.Nodes[i].Caches[j].LogicalProcessors); k++ {
  641. if reservedCpus != nil && reservedCpus.Contains(int(info.Nodes[i].Caches[j].LogicalProcessors[k])) {
  642. continue
  643. }
  644. dieBuilder.Add(int(info.Nodes[i].Caches[j].LogicalProcessors[k]))
  645. }
  646. cpuDie.LogicalProcessors = dieBuilder.Result()
  647. cpuDie.initCpuFree(int(desc.CPUCmtbound))
  648. for _, c := range info.Nodes[i].Cores {
  649. if len(c.LogicalProcessors) != 2 {
  650. continue
  651. }
  652. if cpuDie.LogicalProcessors.Contains(c.LogicalProcessors[0]) {
  653. cpuDie.CoreThreadIdMaps[c.LogicalProcessors[0]] = c.LogicalProcessors[1]
  654. cpuDie.CoreThreadIdMaps[c.LogicalProcessors[1]] = c.LogicalProcessors[0]
  655. }
  656. }
  657. node.CpuCount += cpuDie.LogicalProcessors.Size()
  658. node.LogicalProcessors = node.LogicalProcessors.Union(cpuDie.LogicalProcessors)
  659. cpuDies = append(cpuDies, cpuDie)
  660. // TODO: add cpu core builder
  661. }
  662. if !hasL3Cache {
  663. cpuDie := new(CPUDie)
  664. dieBuilder := cpuset.NewBuilder()
  665. for j := 0; j < len(info.Nodes[i].Cores); j++ {
  666. for k := 0; k < len(info.Nodes[i].Cores[j].LogicalProcessors); k++ {
  667. if reservedCpus != nil && reservedCpus.Contains(info.Nodes[i].Cores[j].LogicalProcessors[k]) {
  668. continue
  669. }
  670. dieBuilder.Add(info.Nodes[i].Cores[j].LogicalProcessors[k])
  671. }
  672. }
  673. cpuDie.LogicalProcessors = dieBuilder.Result()
  674. node.CpuCount += cpuDie.LogicalProcessors.Size()
  675. node.LogicalProcessors = node.LogicalProcessors.Union(cpuDie.LogicalProcessors)
  676. cpuDies = append(cpuDies, cpuDie)
  677. }
  678. hasL3Cache = false
  679. node.CpuDies = cpuDies
  680. hostTopo.Nodes[i] = node
  681. }
  682. hostTopo.CPUCmtbound = desc.CPUCmtbound
  683. hostTopo.NumaEnabled = numaEnabled
  684. hostTopo.HostName = desc.Name
  685. desc.HostTopo = hostTopo
  686. //log.Infof("host topo %s", jsonutils.Marshal(hostTopo))
  687. sort.Sort(desc.HostTopo)
  688. desc.EnableCpuNumaAllocate = true
  689. return nil
  690. }
  691. type ReservedResource struct {
  692. CPUCount int64 `json:"cpu_count"`
  693. MemorySize int64 `json:"memory_size"`
  694. StorageSize int64 `json:"storage_size"`
  695. }
  696. func NewReservedResource(cpu, mem, storage int64) *ReservedResource {
  697. return &ReservedResource{
  698. CPUCount: cpu,
  699. MemorySize: mem,
  700. StorageSize: storage,
  701. }
  702. }
  703. func NewGuestReservedResourceByBuilder(b *HostBuilder, host *computemodels.SHost) (ret *ReservedResource) {
  704. ret = NewReservedResource(0, 0, 0)
  705. //isoDevs := b.getUnusedIsolatedDevices(host.ID)
  706. isoDevs := b.getIsolatedDevices(host.Id)
  707. if len(isoDevs) == 0 {
  708. return
  709. }
  710. reservedResource := host.GetDevsReservedResource(isoDevs)
  711. if reservedResource != nil {
  712. ret.CPUCount = int64(*reservedResource.ReservedCpu)
  713. ret.MemorySize = int64(*reservedResource.ReservedMemory)
  714. ret.StorageSize = int64(*reservedResource.ReservedStorage)
  715. }
  716. return
  717. }
  718. func NewGuestReservedResourceUsedByBuilder(b *HostBuilder, host *computemodels.SHost, free *ReservedResource) (ret *ReservedResource, err error) {
  719. ret = NewReservedResource(0, 0, 0)
  720. gst := b.getIsolatedDeviceGuests(host.Id)
  721. if len(gst) == 0 {
  722. return
  723. }
  724. var (
  725. cpu int64 = 0
  726. mem int64 = 0
  727. disk int64 = 0
  728. )
  729. guestDiskSize := func(g *computemodels.SGuest, onlyLocal bool) int {
  730. size := 0
  731. disks, _ := g.GetDisks()
  732. for _, disk := range disks {
  733. if !onlyLocal || disk.IsLocal() {
  734. size += disk.DiskSize
  735. }
  736. }
  737. return size
  738. }
  739. for _, g := range gst {
  740. dSize := guestDiskSize(&g, true)
  741. disk += int64(dSize)
  742. if o.Options.IgnoreNonrunningGuests && (g.Status == computeapi.VM_READY) {
  743. continue
  744. }
  745. cpu += int64(g.VcpuCount)
  746. mem += int64(g.VmemSize)
  747. }
  748. usedF := func(used, free int64) int64 {
  749. if used <= free {
  750. return used
  751. }
  752. return free
  753. }
  754. ret.CPUCount = usedF(cpu, free.CPUCount)
  755. ret.MemorySize = usedF(mem, free.MemorySize)
  756. ret.StorageSize = usedF(disk, free.StorageSize)
  757. return
  758. }
  759. func (h *HostDesc) String() string {
  760. s, _ := json.Marshal(h)
  761. return string(s)
  762. }
  763. func (h *HostDesc) Type() int {
  764. // Guest type
  765. return 0
  766. }
  767. func (h *HostDesc) Getter() core.CandidatePropertyGetter {
  768. return newHostGetter(h)
  769. }
  770. func (h *HostDesc) GetGuestCount() int64 {
  771. return h.GuestCount
  772. }
  773. func (h *HostDesc) GetTotalLocalStorageSize(useRsvd bool) int64 {
  774. return h.totalStorageSize(true, useRsvd)
  775. }
  776. func (h *HostDesc) GetFreeLocalStorageSize(useRsvd bool) int64 {
  777. return h.freeStorageSize(true, useRsvd)
  778. }
  779. func (h *HostDesc) totalStorageSize(onlyLocal, useRsvd bool) int64 {
  780. total := int64(0)
  781. for _, storage := range h.Storages {
  782. if !onlyLocal || storage.IsLocal() {
  783. total += int64(storage.GetCapacity())
  784. }
  785. }
  786. if onlyLocal {
  787. return reservedResourceMinusCal(total, h.GuestReservedResource.StorageSize, useRsvd)
  788. }
  789. return total
  790. }
  791. func (h *HostDesc) freeStorageSize(onlyLocal, useRsvd bool) int64 {
  792. total := int64(0)
  793. for _, storage := range h.Storages {
  794. if !onlyLocal || storage.IsLocal() {
  795. total += int64(storage.FreeCapacity)
  796. }
  797. }
  798. total = total + h.GuestReservedResourceUsed.StorageSize - h.GetReservedStorageSize()
  799. sizeSub := h.GuestReservedResource.StorageSize - h.GuestReservedResourceUsed.StorageSize
  800. if sizeSub < 0 {
  801. total += sizeSub
  802. }
  803. if useRsvd {
  804. return reservedResourceAddCal(total, h.GuestReservedStorageSizeFree(), useRsvd)
  805. }
  806. return total
  807. }
  808. func (h *HostDesc) GetFreeStorageSizeOfType(sType string, mediumType string, useRsvd bool, reqMaxSize int64) (int64, int64, error) {
  809. return h.freeStorageSizeOfType(sType, mediumType, useRsvd, reqMaxSize)
  810. }
  811. func (h *HostDesc) freeStorageSizeOfType(storageType string, mediumType string, useRsvd bool, reqMaxSize int64) (int64, int64, error) {
  812. var total int64
  813. var actualTotal int64
  814. foundLEReqStore := false
  815. errs := make([]error, 0)
  816. for _, storage := range h.Storages {
  817. if IsStorageBackendMediumMatch(storage, storageType, mediumType) {
  818. total += int64(storage.FreeCapacity)
  819. actualTotal += int64(storage.ActualFreeCapacity)
  820. if err := checkStorageSize(storage, reqMaxSize, useRsvd); err != nil {
  821. errs = append(errs, err)
  822. } else {
  823. foundLEReqStore = true
  824. }
  825. }
  826. }
  827. if utils.IsLocalStorage(storageType) {
  828. total = total + h.GuestReservedResourceUsed.StorageSize - h.GetReservedStorageSize()
  829. sizeSub := h.GuestReservedResource.StorageSize - h.GuestReservedResourceUsed.StorageSize
  830. if sizeSub < 0 {
  831. total += sizeSub
  832. }
  833. }
  834. if !foundLEReqStore {
  835. return 0, 0, errors.NewAggregate(errs)
  836. }
  837. if useRsvd {
  838. return reservedResourceAddCal(total, h.GuestReservedStorageSizeFree(), useRsvd), actualTotal, nil
  839. }
  840. return total - int64(h.GetPendingUsage().DiskUsage.Get(storageType)), actualTotal, nil
  841. }
  842. func (h *HostDesc) GetFreePort(netId string) int {
  843. freeCnt := h.BaseHostDesc.GetFreePort(netId)
  844. return freeCnt - schedmodels.HostPendingUsageManager.GetNetPendingUsage(netId)
  845. }
  846. func reservedResourceCal(
  847. curRes, rsvdRes int64,
  848. useRsvd, minusRsvd bool,
  849. ) int64 {
  850. actRes := curRes
  851. if useRsvd {
  852. if minusRsvd {
  853. actRes -= rsvdRes
  854. } else {
  855. actRes += rsvdRes
  856. }
  857. }
  858. return actRes
  859. }
  860. func reservedResourceAddCal(curRes, rsvdRes int64, useRsvd bool) int64 {
  861. return reservedResourceCal(curRes, rsvdRes, useRsvd, false)
  862. }
  863. func reservedResourceMinusCal(curRes, rsvdRes int64, useRsvd bool) int64 {
  864. return reservedResourceCal(curRes, rsvdRes, useRsvd, true)
  865. }
  866. func (h *HostDesc) GetTotalMemSize(useRsvd bool) int64 {
  867. return reservedResourceMinusCal(h.TotalMemSize, h.GuestReservedResource.MemorySize, useRsvd)
  868. }
  869. func (h *HostDesc) GetFreeMemSize(useRsvd bool) int64 {
  870. return reservedResourceAddCal(h.FreeMemSize, h.GuestReservedMemSizeFree(), useRsvd) - int64(h.GetPendingUsage().Memory)
  871. }
  872. func (h *HostDesc) GetFreeCpuNuma() scheduler.SortedFreeNumaCpuMam {
  873. if !h.EnableCpuNumaAllocate {
  874. return nil
  875. }
  876. res := make(scheduler.SortedFreeNumaCpuMam, 0)
  877. cpuPin := h.GetPendingUsage().CpuPin
  878. numaPin := h.GetPendingUsage().NumaMemPin
  879. for i := range h.HostTopo.Nodes {
  880. nodeFree := new(scheduler.SFreeNumaCpuMem)
  881. nodeFree.NodeId = h.HostTopo.Nodes[i].NodeId
  882. nodeFree.CpuCount = h.HostTopo.Nodes[i].CpuCount
  883. nodeFree.MemSize = h.HostTopo.Nodes[i].NumaNodeFreeMemSizeKB / 1024
  884. nodeFree.EnableNumaAllocate = h.HostTopo.NumaEnabled
  885. nodeFree.FreeCpuCount = int(float32(h.HostTopo.Nodes[i].CpuCount)*h.CPUCmtbound) - h.HostTopo.Nodes[i].VcpuCount
  886. for cpuId, pending := range cpuPin {
  887. if h.HostTopo.Nodes[i].LogicalProcessors.Contains(cpuId) {
  888. nodeFree.FreeCpuCount -= pending
  889. }
  890. }
  891. if memSize, ok := numaPin[h.HostTopo.Nodes[i].NodeId]; ok {
  892. nodeFree.MemSize -= memSize
  893. }
  894. res = append(res, nodeFree)
  895. }
  896. sort.Sort(res)
  897. return res
  898. }
  899. func (h *HostDesc) GuestReservedMemSizeFree() int64 {
  900. return h.GuestReservedResource.MemorySize - h.GuestReservedResourceUsed.MemorySize
  901. }
  902. func (h *HostDesc) GuestReservedCPUCountFree() int64 {
  903. return h.GuestReservedResource.CPUCount - h.GuestReservedResourceUsed.CPUCount
  904. }
  905. func (h *HostDesc) GuestReservedStorageSizeFree() int64 {
  906. return h.GuestReservedResource.StorageSize - h.GuestReservedResourceUsed.StorageSize
  907. }
  908. func (h *HostDesc) GetReservedMemSize() int64 {
  909. return h.GuestReservedResource.MemorySize + int64(h.MemReserved)
  910. }
  911. func (h *HostDesc) GetReservedCPUCount() int64 {
  912. return h.GuestReservedResource.CPUCount + int64(h.CpuReserved)
  913. }
  914. func (h *HostDesc) GetReservedStorageSize() int64 {
  915. return h.GuestReservedResource.StorageSize
  916. }
  917. func (h *HostDesc) GetTotalCPUCount(useRsvd bool) int64 {
  918. return reservedResourceMinusCal(h.TotalCPUCount, h.GuestReservedResource.CPUCount, useRsvd)
  919. }
  920. func (h *HostDesc) GetFreeCPUCount(useRsvd bool) int64 {
  921. return reservedResourceAddCal(h.FreeCPUCount, h.GuestReservedCPUCountFree(), useRsvd) - int64(h.GetPendingUsage().Cpu)
  922. }
  923. func (h *HostDesc) IndexKey() string {
  924. return h.Id
  925. }
  926. func (h *HostDesc) AllocCpuNumaPin(vcpuCount, memSizeKB int, preferNumaNodes []int) []scheduler.SCpuNumaPin {
  927. if !h.EnableCpuNumaAllocate {
  928. return nil
  929. }
  930. hostTopo := h.HostTopo
  931. pendingUsage := h.GetPendingUsage()
  932. if len(pendingUsage.CpuPin) > 0 || len(pendingUsage.NumaMemPin) > 0 {
  933. hostTopo = HostTopoSubPendingUsage(h.HostTopo, pendingUsage.CpuPin, pendingUsage.NumaMemPin)
  934. }
  935. ignoreMemSingular := h.HostType == computeapi.HOST_TYPE_CONTAINER && o.Options.ContainerNumaAllocate
  936. return hostTopo.AllocCpuNumaNodes(vcpuCount, memSizeKB, ignoreMemSingular, preferNumaNodes)
  937. }
  938. func (h *HostDesc) AllocCpuNumaPinWithNodeCount(vcpuCount, memSizeKB, nodeCount int) []scheduler.SCpuNumaPin {
  939. if !h.EnableCpuNumaAllocate {
  940. return nil
  941. }
  942. hostTopo := h.HostTopo
  943. pendingUsage := h.GetPendingUsage()
  944. if len(pendingUsage.CpuPin) > 0 || len(pendingUsage.NumaMemPin) > 0 {
  945. hostTopo = HostTopoSubPendingUsage(h.HostTopo, pendingUsage.CpuPin, pendingUsage.NumaMemPin)
  946. }
  947. return hostTopo.AllocCpuNumaNodesWithNodeCount(vcpuCount, memSizeKB, nodeCount)
  948. }
  949. type WaitGroupWrapper struct {
  950. gosync.WaitGroup
  951. }
  952. func (w *WaitGroupWrapper) Wrap(cb func()) {
  953. w.Add(1)
  954. go func() {
  955. cb()
  956. w.Done()
  957. }()
  958. }
  959. func waitTimeOut(wg *WaitGroupWrapper, timeout time.Duration) bool {
  960. ch := make(chan struct{})
  961. go func() {
  962. wg.Wait()
  963. close(ch)
  964. }()
  965. select {
  966. case <-ch:
  967. return true
  968. case <-time.After(timeout):
  969. return false
  970. }
  971. }
  972. type HostBuilder struct {
  973. *baseBuilder
  974. residentTenantDict map[string]map[string]interface{}
  975. guests []computemodels.SGuest
  976. guestDict map[string]interface{}
  977. guestIDs []string
  978. hostStorages []computemodels.SHoststorage
  979. //hostStoragesDict map[string][]*computemodels.SStorage
  980. storages []interface{}
  981. storageStatesSizeDict map[string]map[string]interface{}
  982. hostGuests map[string][]interface{}
  983. hostBackupGuests map[string][]interface{}
  984. //groupGuests []interface{}
  985. //groups []interface{}
  986. //groupDict map[string]interface{}
  987. //hostGroupCountDict HostGroupCountDict
  988. //hostMetadatas []interface{}
  989. //hostMetadatasDict map[string][]interface{}
  990. //guestMetadatas []interface{}
  991. //guestMetadatasDict map[string][]interface{}
  992. //diskStats []models.StorageCapacity
  993. // isolatedDevicesDict map[string][]interface{}
  994. cpuIOLoads map[string]map[string]float64
  995. }
  996. func newHostBuilder() *HostBuilder {
  997. builder := new(HostBuilder)
  998. builder.baseBuilder = newBaseBuilder(HostDescBuilder, builder)
  999. return builder
  1000. }
  1001. func (b *HostBuilder) FetchHosts(ids []string) ([]computemodels.SHost, error) {
  1002. hosts := computemodels.HostManager.Query()
  1003. q := hosts.In("id", ids).NotEquals("host_type", computeapi.HOST_TYPE_BAREMETAL)
  1004. hostObjs := make([]computemodels.SHost, 0)
  1005. err := computedb.FetchModelObjects(computemodels.HostManager, q, &hostObjs)
  1006. return hostObjs, err
  1007. }
  1008. func (b *HostBuilder) setGuests(hosts []computemodels.SHost, errMessageChannel chan error) {
  1009. idsQuery := b.AllIDsQuery()
  1010. guests, err := FetchGuestByHostIDsQuery(idsQuery)
  1011. if err != nil {
  1012. errMessageChannel <- err
  1013. return
  1014. }
  1015. guestIDs := make([]string, len(guests))
  1016. func() {
  1017. for i, gst := range guests {
  1018. guestIDs[i] = gst.GetId()
  1019. }
  1020. }()
  1021. hostGuests, err := utils.GroupBy(guests, func(obj interface{}) (string, error) {
  1022. gst, ok := obj.(computemodels.SGuest)
  1023. if !ok {
  1024. return "", utils.ConvertError(obj, "computemodels.SGuest")
  1025. }
  1026. return gst.HostId, nil
  1027. })
  1028. if err != nil {
  1029. errMessageChannel <- err
  1030. return
  1031. }
  1032. hostBackupGuests, err := utils.GroupBy(guests, func(obj interface{}) (string, error) {
  1033. gst, ok := obj.(computemodels.SGuest)
  1034. if !ok {
  1035. return "", utils.ConvertError(obj, "computemodels.SGuest")
  1036. }
  1037. return gst.BackupHostId, nil
  1038. })
  1039. if err != nil {
  1040. errMessageChannel <- err
  1041. return
  1042. }
  1043. guestDict, err := utils.ToDict(guests, func(obj interface{}) (string, error) {
  1044. gst, ok := obj.(computemodels.SGuest)
  1045. if !ok {
  1046. return "", utils.ConvertError(obj, "computemodels.SGuest")
  1047. }
  1048. return gst.GetId(), nil
  1049. })
  1050. if err != nil {
  1051. errMessageChannel <- err
  1052. return
  1053. }
  1054. b.guestIDs = guestIDs
  1055. b.guests = guests
  1056. b.hostGuests = hostGuests
  1057. b.hostBackupGuests = hostBackupGuests
  1058. b.guestDict = guestDict
  1059. return
  1060. }
  1061. //func (b *HostBuilder) setGroupInfo(errMessageChannel chan error) {
  1062. //groupGuests, err := models.FetchByGuestIDs(models.GroupGuests, b.guestIDs)
  1063. //if err != nil {
  1064. //errMessageChannel <- err
  1065. //return
  1066. //}
  1067. //groupIds, err := utils.SelectDistinct(groupGuests, func(obj interface{}) (string, error) {
  1068. //g, ok := obj.(*models.GroupGuest)
  1069. //if !ok {
  1070. //return "", utils.ConvertError(obj, "*models.GroupGuest")
  1071. //}
  1072. //return g.GroupID, nil
  1073. //})
  1074. //if err != nil {
  1075. //errMessageChannel <- err
  1076. //return
  1077. //}
  1078. //groups, err := models.FetchGroupByIDs(groupIds)
  1079. //if err != nil {
  1080. //errMessageChannel <- err
  1081. //return
  1082. //}
  1083. //groupDict, err := utils.ToDict(groups, func(obj interface{}) (string, error) {
  1084. //grp, ok := obj.(*models.Group)
  1085. //if !ok {
  1086. //return "", utils.ConvertError(obj, "*models.Group")
  1087. //}
  1088. //return grp.ID, nil
  1089. //})
  1090. //if err != nil {
  1091. //errMessageChannel <- err
  1092. //return
  1093. //}
  1094. //b.groups = groups
  1095. //b.groupDict = groupDict
  1096. //b.groupGuests = groupGuests
  1097. //hostGroupCountDict, err := b.toHostGroupCountDict(groupGuests)
  1098. //if err != nil {
  1099. //errMessageChannel <- err
  1100. //return
  1101. //}
  1102. //b.hostGroupCountDict = hostGroupCountDict
  1103. //return
  1104. //}
  1105. //type GroupCount struct {
  1106. //ID string `json:"id"` // group id
  1107. //Name string `json:"name"` // group name
  1108. //Count int64 `json:"count"` // guest count
  1109. //}
  1110. //type GroupCounts struct {
  1111. //Data map[string]*GroupCount `json:"data"` // group_id: group_count
  1112. //}
  1113. //func NewGroupCounts() *GroupCounts {
  1114. //return &GroupCounts{
  1115. //Data: make(map[string]*GroupCount),
  1116. //}
  1117. //}
  1118. //type HostGroupCountDict map[string]*GroupCounts
  1119. //func (b *HostBuilder) toHostGroupCountDict(groupGuests []interface{}) (HostGroupCountDict, error) {
  1120. //d := make(map[string]*GroupCounts)
  1121. //for _, groupGuestObj := range groupGuests {
  1122. //groupGuest := groupGuestObj.(*models.GroupGuest)
  1123. //groupObj, grpOK := b.groupDict[groupGuest.GroupID]
  1124. //guestObj, gstOK := b.guestDict[*groupGuest.GuestID]
  1125. //if !grpOK || !gstOK {
  1126. //continue
  1127. //}
  1128. //hostObj, ok := b.hostDict[guestObj.(*models.Guest).HostID]
  1129. //if !ok {
  1130. //continue
  1131. //}
  1132. //host := hostObj.(*models.Host)
  1133. //group := groupObj.(*models.Group)
  1134. //counts, ok := d[host.ID]
  1135. //if !ok {
  1136. //counts = NewGroupCounts()
  1137. //d[host.ID] = counts
  1138. //}
  1139. //count, ok := counts.Data[group.ID]
  1140. //if !ok {
  1141. //count = &GroupCount{ID: group.ID, Name: group.Name, Count: 1}
  1142. //counts.Data[group.ID] = count
  1143. //} else {
  1144. //count.Count++
  1145. //}
  1146. //counts.Data[host.ID] = count
  1147. //}
  1148. //return d, nil
  1149. //}
  1150. //func (b *HostBuilder) setMetadataInfo(hostIDs []string, errMessageChannel chan error) {
  1151. //hostMetadataNames := []string{"dynamic_load_cpu_percent", "dynamic_load_io_util",
  1152. //"enable_sriov", "bridge_driver"}
  1153. //hostMetadataNames = append(hostMetadataNames, models.HostExtraFeature...)
  1154. //hostMetadatas, err := models.FetchMetadatas(models.HostResourceName, hostIDs, hostMetadataNames)
  1155. //if err != nil {
  1156. //errMessageChannel <- err
  1157. //return
  1158. //}
  1159. //guestMetadataNames := []string{"app_tags"}
  1160. //guestMetadatas, err := models.FetchMetadatas(models.GuestResourceName, b.guestIDs, guestMetadataNames)
  1161. //if err != nil {
  1162. //errMessageChannel <- err
  1163. //return
  1164. //}
  1165. //idFunc := func(obj interface{}) (string, error) {
  1166. //metadata, ok := obj.(*models.Metadata)
  1167. //if !ok {
  1168. //return "", utils.ConvertError(obj, "*models.Metadata")
  1169. //}
  1170. //id := strings.Split(metadata.ID, "::")[1]
  1171. //return id, nil
  1172. //}
  1173. //hostMetadatasDict, err := utils.GroupBy(hostMetadatas, idFunc)
  1174. //if err != nil {
  1175. //errMessageChannel <- err
  1176. //return
  1177. //}
  1178. //guestMetadatasDict, err := utils.GroupBy(guestMetadatas, idFunc)
  1179. //if err != nil {
  1180. //errMessageChannel <- err
  1181. //return
  1182. //}
  1183. //b.hostMetadatas = hostMetadatas
  1184. //b.hostMetadatasDict = hostMetadatasDict
  1185. //b.guestMetadatas = guestMetadatas
  1186. //b.guestMetadatasDict = guestMetadatasDict
  1187. //return
  1188. //}
  1189. /*func (b *HostBuilder) setDiskStats(errMessageChannel chan error) {
  1190. storageIDs := make([]string, len(b.storages))
  1191. func() {
  1192. for i, s := range b.storages {
  1193. storageIDs[i] = s.(*models.Storage).ID
  1194. }
  1195. }()
  1196. capacities, err := models.GetStorageCapacities(storageIDs)
  1197. stat3 := make([]utils.StatItem3, len(capacities))
  1198. for i, item := range capacities {
  1199. stat3[i] = item
  1200. }
  1201. if err != nil {
  1202. errMessageChannel <- err
  1203. return
  1204. }
  1205. storageStatesSizeDict, _ := utils.ToStatDict3(stat3)
  1206. b.storageStatesSizeDict = storageStatesSizeDict
  1207. b.diskStats = capacities
  1208. return
  1209. }*/
  1210. func (b *HostBuilder) Clone() BuildActor {
  1211. return newHostBuilder()
  1212. }
  1213. func (b *HostBuilder) AllIDs() ([]string, error) {
  1214. q := computemodels.HostManager.Query("id")
  1215. q = q.Filter(sqlchemy.NotEquals(q.Field("host_type"), computeapi.HOST_TYPE_BAREMETAL))
  1216. return FetchModelIds(q)
  1217. }
  1218. func (b *HostBuilder) AllIDsQuery() sqlchemy.IQuery {
  1219. q := computemodels.HostManager.Query("id")
  1220. q = q.Filter(sqlchemy.NotEquals(q.Field("host_type"), computeapi.HOST_TYPE_BAREMETAL))
  1221. return q
  1222. }
  1223. func (b *HostBuilder) InitFuncs() []InitFunc {
  1224. return []InitFunc{
  1225. // b.setSchedtags,
  1226. b.setGuests,
  1227. }
  1228. }
  1229. // build host desc
  1230. func (b *HostBuilder) BuildOne(host *computemodels.SHost, getter *networkGetter, baseDesc *BaseHostDesc) (interface{}, error) {
  1231. desc := &HostDesc{
  1232. BaseHostDesc: baseDesc,
  1233. }
  1234. desc.Metadata = make(map[string]string)
  1235. desc.CPUCmtbound = host.GetCPUOvercommitBound()
  1236. desc.MemCmtbound = host.GetMemoryOvercommitBound()
  1237. desc.GuestReservedResource = NewGuestReservedResourceByBuilder(b, host)
  1238. guestRsvdUsed, err := NewGuestReservedResourceUsedByBuilder(b, host, desc.GuestReservedResource)
  1239. if err != nil {
  1240. return nil, err
  1241. }
  1242. desc.GuestReservedResourceUsed = guestRsvdUsed
  1243. fillFuncs := []func(*HostDesc, *computemodels.SHost) error{
  1244. //b.fillResidentGroups,
  1245. b.fillMetadata,
  1246. b.fillCPUIOLoads,
  1247. b.fillGuestsCpuNumaPin,
  1248. b.fillGuestsResourceInfo,
  1249. }
  1250. for _, f := range fillFuncs {
  1251. err := f(desc, host)
  1252. if err != nil {
  1253. return nil, err
  1254. }
  1255. }
  1256. return desc, nil
  1257. }
  1258. func (b *HostBuilder) fillGuestsCpuNumaPin(desc *HostDesc, host *computemodels.SHost) error {
  1259. if !host.EnableNumaAllocate {
  1260. return nil
  1261. }
  1262. topoObj, err := host.SysInfo.Get("topology")
  1263. if err != nil {
  1264. return errors.Wrap(err, "get topology from host sys_info")
  1265. }
  1266. hostTopo := new(hostapi.HostTopology)
  1267. if err := topoObj.Unmarshal(hostTopo); err != nil {
  1268. return errors.Wrap(err, "Unmarshal host topology struct")
  1269. }
  1270. var reservedCpus *cpuset.CPUSet
  1271. reservedCpusStr := host.GetMetadata(context.Background(), computeapi.HOSTMETA_RESERVED_CPUS_INFO, nil)
  1272. if reservedCpusStr != "" {
  1273. reservedCpusJson, err := jsonutils.ParseString(reservedCpusStr)
  1274. if err != nil {
  1275. return errors.Wrap(err, "parse reserved cpus info failed")
  1276. }
  1277. reservedCpusInfo := computeapi.HostReserveCpusInput{}
  1278. err = reservedCpusJson.Unmarshal(&reservedCpusInfo)
  1279. if err != nil {
  1280. return errors.Wrap(err, "unmarshal host reserved cpus info failed")
  1281. }
  1282. reservedCpuset, err := cpuset.Parse(reservedCpusInfo.Cpus)
  1283. if err != nil {
  1284. return errors.Wrap(err, "cpuset parse reserved cpus")
  1285. }
  1286. reservedCpus = &reservedCpuset
  1287. }
  1288. pinnedCpuset, err := host.GetPinnedCpusetCores(context.Background(), nil, nil)
  1289. if err != nil {
  1290. return err
  1291. }
  1292. if pinnedCpuset != nil {
  1293. if reservedCpus == nil {
  1294. reservedCpus = pinnedCpuset
  1295. } else {
  1296. newset := reservedCpus.Union(*pinnedCpuset)
  1297. reservedCpus = &newset
  1298. }
  1299. }
  1300. nodeHugepages := make([]hostapi.HostNodeHugepageNr, 0)
  1301. if host.SysInfo.Contains("node_hugepages") {
  1302. err = host.SysInfo.Unmarshal(&nodeHugepages, "node_hugepages")
  1303. if err != nil {
  1304. return errors.Wrap(err, "unmarshal node hugepages")
  1305. }
  1306. }
  1307. hugepageSizeKb, err := host.SysInfo.Int("hugepage_size_kb")
  1308. if err != nil {
  1309. return errors.Wrap(err, "unmarshal hugepage size kb")
  1310. }
  1311. return b.buildHostTopo(desc, reservedCpus, int(hugepageSizeKb), nodeHugepages, hostTopo)
  1312. }
  1313. func (b *HostBuilder) fillGuestsResourceInfo(desc *HostDesc, host *computemodels.SHost) error {
  1314. var (
  1315. guestCount int64
  1316. runningCount int64
  1317. memSize int64
  1318. memReqSize int64
  1319. memFakeDeletedSize int64
  1320. cpuCount int64
  1321. cpuReqCount int64
  1322. cpuBoundCount int64
  1323. cpuFakeDeletedCount int64
  1324. ioBoundCount int64
  1325. creatingMemSize int64
  1326. creatingCPUCount int64
  1327. creatingGuestCount int64
  1328. guestsCpuNumaPin = make([]scheduler.SCpuNumaPin, 0)
  1329. )
  1330. guestsOnHost, ok := b.hostGuests[host.Id]
  1331. if !ok {
  1332. guestsOnHost = []interface{}{}
  1333. }
  1334. backupGuestsOnHost, ok := b.hostBackupGuests[host.Id]
  1335. if ok {
  1336. guestsOnHost = append(guestsOnHost, backupGuestsOnHost...)
  1337. }
  1338. pendingUsage := desc.GetPendingUsage()
  1339. desc.Tenants = make(map[string]int64)
  1340. for _, gst := range guestsOnHost {
  1341. guest := gst.(computemodels.SGuest)
  1342. projectId := guest.ProjectId
  1343. if count, ok := desc.Tenants[projectId]; ok {
  1344. desc.Tenants[projectId] = count + 1
  1345. } else {
  1346. desc.Tenants[projectId] = 1
  1347. }
  1348. if IsGuestPendingDelete(guest) {
  1349. memFakeDeletedSize += int64(guest.VmemSize)
  1350. cpuFakeDeletedCount += int64(guest.VcpuCount)
  1351. } else {
  1352. if _, ok := pendingUsage.PendingGuestIds[guest.Id]; ok {
  1353. log.Infof("fillGuestsResourceInfo guest %s in pending usage", guest.Id)
  1354. continue
  1355. }
  1356. if IsGuestCreating(guest) {
  1357. creatingGuestCount++
  1358. creatingMemSize += int64(guest.VmemSize)
  1359. creatingCPUCount += int64(guest.VcpuCount)
  1360. if guest.IsSchedulerNumaAllocate() {
  1361. cpuNumaPin := make([]scheduler.SCpuNumaPin, 0)
  1362. if err := guest.CpuNumaPin.Unmarshal(&cpuNumaPin); err != nil {
  1363. return errors.Wrap(err, "unmarshal cpu numa pin")
  1364. }
  1365. for i := range cpuNumaPin {
  1366. if cpuNumaPin[i].ExtraCpuCount > 0 {
  1367. creatingCPUCount += int64(cpuNumaPin[i].ExtraCpuCount)
  1368. }
  1369. }
  1370. guestsCpuNumaPin = append(guestsCpuNumaPin, cpuNumaPin...)
  1371. }
  1372. } else if !IsGuestStoppedStatus(guest) {
  1373. // running status
  1374. runningCount++
  1375. memSize += int64(guest.VmemSize)
  1376. cpuCount += int64(guest.VcpuCount)
  1377. if guest.IsSchedulerNumaAllocate() {
  1378. cpuNumaPin := make([]scheduler.SCpuNumaPin, 0)
  1379. if err := guest.CpuNumaPin.Unmarshal(&cpuNumaPin); err != nil {
  1380. return errors.Wrap(err, "unmarshal cpu numa pin")
  1381. }
  1382. for i := range cpuNumaPin {
  1383. if cpuNumaPin[i].ExtraCpuCount > 0 {
  1384. creatingCPUCount += int64(cpuNumaPin[i].ExtraCpuCount)
  1385. }
  1386. }
  1387. guestsCpuNumaPin = append(guestsCpuNumaPin, cpuNumaPin...)
  1388. }
  1389. }
  1390. }
  1391. //if IsGuestRunning(guest) {
  1392. // runningCount++
  1393. // memSize += int64(guest.VmemSize)
  1394. // cpuCount += int64(guest.VcpuCount)
  1395. // if host.EnableNumaAllocate && guest.CpuNumaPin != nil {
  1396. // cpuNumaPin := make([]scheduler.SCpuNumaPin, 0)
  1397. // if err := guest.CpuNumaPin.Unmarshal(&cpuNumaPin); err != nil {
  1398. // return errors.Wrap(err, "unmarshal cpu numa pin")
  1399. // }
  1400. // guestsCpuNumaPin = append(guestsCpuNumaPin, cpuNumaPin...)
  1401. // }
  1402. //} else if IsGuestCreating(guest) {
  1403. // creatingGuestCount++
  1404. // creatingMemSize += int64(guest.VmemSize)
  1405. // creatingCPUCount += int64(guest.VcpuCount)
  1406. // if host.EnableNumaAllocate && guest.CpuNumaPin != nil {
  1407. // cpuNumaPin := make([]scheduler.SCpuNumaPin, 0)
  1408. // if err := guest.CpuNumaPin.Unmarshal(&cpuNumaPin); err != nil {
  1409. // return errors.Wrap(err, "unmarshal cpu numa pin")
  1410. // }
  1411. // guestsCpuNumaPin = append(guestsCpuNumaPin, cpuNumaPin...)
  1412. // }
  1413. //} else if IsGuestPendingDelete(guest) {
  1414. // memFakeDeletedSize += int64(guest.VmemSize)
  1415. // cpuFakeDeletedCount += int64(guest.VcpuCount)
  1416. //}
  1417. guestCount++
  1418. cpuReqCount += int64(guest.VcpuCount)
  1419. memReqSize += int64(guest.VmemSize)
  1420. //appTags := b.guestAppTags(guest)
  1421. //for _, tag := range appTags {
  1422. //if tag == "cpu_bound" {
  1423. //cpuBoundCount += int64(guest.VcpuCount)
  1424. //} else if tag == "io_bound" {
  1425. //ioBoundCount++
  1426. //}
  1427. //}
  1428. }
  1429. if host.EnableNumaAllocate && len(guestsCpuNumaPin) > 0 {
  1430. desc.HostTopo.LoadCpuNumaPin(guestsCpuNumaPin)
  1431. }
  1432. //log.Infof("host %s topo %s", desc.Name, jsonutils.Marshal(desc.HostTopo))
  1433. desc.GuestCount = guestCount
  1434. desc.CreatingGuestCount = creatingGuestCount
  1435. desc.RunningGuestCount = runningCount
  1436. desc.RunningMemSize = memSize
  1437. desc.RequiredMemSize = memReqSize
  1438. desc.CreatingMemSize = creatingMemSize
  1439. desc.FakeDeletedMemSize = memFakeDeletedSize
  1440. desc.RunningCPUCount = cpuCount
  1441. desc.RequiredCPUCount = cpuReqCount
  1442. desc.CreatingCPUCount = creatingCPUCount
  1443. desc.FakeDeletedCPUCount = cpuFakeDeletedCount
  1444. desc.TotalMemSize = int64(float32(desc.MemSize) * desc.MemCmtbound)
  1445. desc.TotalCPUCount = int64(float32(desc.CpuCount) * desc.CPUCmtbound)
  1446. var memFreeSize int64
  1447. var cpuFreeCount int64
  1448. if o.Options.IgnoreNonrunningGuests {
  1449. memFreeSize = desc.TotalMemSize - desc.RunningMemSize - desc.CreatingMemSize
  1450. cpuFreeCount = desc.TotalCPUCount - desc.RunningCPUCount - desc.CreatingCPUCount
  1451. } else {
  1452. memFreeSize = desc.TotalMemSize - desc.RequiredMemSize
  1453. cpuFreeCount = desc.TotalCPUCount - desc.RequiredCPUCount
  1454. if o.Options.IgnoreFakeDeletedGuests {
  1455. memFreeSize += memFakeDeletedSize
  1456. cpuFreeCount += cpuFakeDeletedCount
  1457. }
  1458. }
  1459. // free memory size calculate
  1460. rsvdUseMem := desc.GuestReservedResourceUsed.MemorySize
  1461. memFreeSize = memFreeSize + rsvdUseMem - desc.GetReservedMemSize()
  1462. memSub := desc.GuestReservedResource.MemorySize - desc.GuestReservedResourceUsed.MemorySize
  1463. if memSub < 0 {
  1464. memFreeSize += memSub
  1465. }
  1466. desc.FreeMemSize = memFreeSize
  1467. // free cpu count calculate
  1468. rsvdUseCPU := desc.GuestReservedResourceUsed.CPUCount
  1469. cpuFreeCount = cpuFreeCount + rsvdUseCPU - desc.GetReservedCPUCount()
  1470. cpuSub := desc.GuestReservedResource.CPUCount - desc.GuestReservedResourceUsed.CPUCount
  1471. if cpuSub < 0 {
  1472. cpuFreeCount += cpuSub
  1473. }
  1474. desc.FreeCPUCount = cpuFreeCount
  1475. desc.CPUBoundCount = cpuBoundCount
  1476. desc.IOBoundCount = ioBoundCount
  1477. return nil
  1478. }
  1479. /*func (b *HostBuilder) guestAppTags(guest computemodels.SGuest) []string {
  1480. metadatas, ok := b.guestMetadatasDict[guest.GetId()]
  1481. if !ok {
  1482. return []string{}
  1483. }
  1484. for _, obj := range metadatas {
  1485. metadata, ok := obj.(*models.Metadata)
  1486. if !ok {
  1487. log.Errorf("%v", utils.ConvertError(obj, "*models.Metadata"))
  1488. return []string{}
  1489. }
  1490. if metadata.Key == "app_tags" {
  1491. tagsStr := metadata.Value
  1492. if len(tagsStr) > 0 {
  1493. return strings.Split(tagsStr, ",")
  1494. }
  1495. }
  1496. }
  1497. return []string{}
  1498. }
  1499. func (b *HostBuilder) storageUsedCapacity(storage *models.Storage, ready bool) int64 {
  1500. d, ok := b.storageStatesSizeDict[storage.ID]
  1501. if !ok {
  1502. return 0
  1503. }
  1504. if ready {
  1505. obj, ok := d[models.DiskReady]
  1506. if !ok {
  1507. return 0
  1508. }
  1509. return obj.(int64)
  1510. }
  1511. var total int64
  1512. for status, sizeObj := range d {
  1513. if (status == models.DiskReady && ready) || (status != models.DiskReady && !ready) {
  1514. total += sizeObj.(int64)
  1515. }
  1516. }
  1517. return total
  1518. }
  1519. func (b *HostBuilder) fillResidentGroups(desc *HostDesc, host *computemodels.SHost) error {
  1520. groups, ok := b.hostGroupCountDict[host.Id]
  1521. if !ok {
  1522. desc.Groups = nil
  1523. return nil
  1524. }
  1525. desc.Groups = groups
  1526. return nil
  1527. }*/
  1528. func (b *HostBuilder) fillMetadata(desc *HostDesc, host *computemodels.SHost) error {
  1529. metadata, err := host.GetAllMetadata(nil, nil)
  1530. if err != nil {
  1531. log.Errorf("Get host %s metadata: %v", desc.GetId(), err)
  1532. return nil
  1533. }
  1534. desc.Metadata = metadata
  1535. return nil
  1536. }
  1537. func (b *HostBuilder) getUsedIsolatedDevices(hostID string) (devs []computemodels.SIsolatedDevice) {
  1538. devs = make([]computemodels.SIsolatedDevice, 0)
  1539. for _, dev := range b.getIsolatedDevices(hostID) {
  1540. if len(dev.GuestId) != 0 {
  1541. devs = append(devs, dev)
  1542. }
  1543. }
  1544. return
  1545. }
  1546. func (b *HostBuilder) getIsolatedDeviceGuests(hostID string) (guests []computemodels.SGuest) {
  1547. guests = make([]computemodels.SGuest, 0)
  1548. usedDevs := b.getUsedIsolatedDevices(hostID)
  1549. if len(usedDevs) == 0 {
  1550. return
  1551. }
  1552. ids := sets.NewString()
  1553. for _, dev := range usedDevs {
  1554. g, ok := b.guestDict[dev.GuestId]
  1555. if !ok {
  1556. continue
  1557. }
  1558. guest := g.(computemodels.SGuest)
  1559. if !ids.Has(guest.Id) {
  1560. ids.Insert(guest.Id)
  1561. guests = append(guests, guest)
  1562. }
  1563. }
  1564. return
  1565. }
  1566. func (b *HostBuilder) getUnusedIsolatedDevices(hostID string) (devs []computemodels.SIsolatedDevice) {
  1567. devs = make([]computemodels.SIsolatedDevice, 0)
  1568. for _, dev := range b.getIsolatedDevices(hostID) {
  1569. if len(dev.GuestId) == 0 {
  1570. devs = append(devs, dev)
  1571. }
  1572. }
  1573. return
  1574. }
  1575. func (b *HostBuilder) fillCPUIOLoads(desc *HostDesc, host *computemodels.SHost) error {
  1576. desc.CPULoad = b.loadByName(host.Id, "cpu_load")
  1577. desc.IOLoad = b.loadByName(host.Id, "io_load")
  1578. return nil
  1579. }
  1580. func (b *HostBuilder) loadByName(hostID, name string) *float64 {
  1581. if b.cpuIOLoads == nil {
  1582. return nil
  1583. }
  1584. loads, ok := b.cpuIOLoads[hostID]
  1585. if !ok {
  1586. return nil
  1587. }
  1588. value := loads[name]
  1589. if value >= 0.0 && value <= 1.0 {
  1590. return &value
  1591. }
  1592. return nil
  1593. }