hpssactl.go 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431
  1. // Copyright 2019 Yunion
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package hpssactl
  15. import (
  16. "fmt"
  17. "regexp"
  18. "strconv"
  19. "strings"
  20. "yunion.io/x/log"
  21. "yunion.io/x/pkg/errors"
  22. "yunion.io/x/pkg/tristate"
  23. "yunion.io/x/pkg/util/stringutils"
  24. "yunion.io/x/pkg/utils"
  25. api "yunion.io/x/onecloud/pkg/apis/compute"
  26. "yunion.io/x/onecloud/pkg/baremetal/utils/raid"
  27. "yunion.io/x/onecloud/pkg/compute/baremetal"
  28. "yunion.io/x/onecloud/pkg/util/regutils2"
  29. )
  30. type HPSARaidPhyDev struct {
  31. *raid.RaidBasePhyDev
  32. addr string
  33. }
  34. func newHPSARaidPhyDev(addr string, adapter int, rotate bool) *HPSARaidPhyDev {
  35. b := raid.NewRaidBasePhyDev(baremetal.DISK_DRIVER_HPSARAID)
  36. b.Adapter = adapter
  37. if rotate {
  38. b.Rotate = tristate.True
  39. } else {
  40. b.Rotate = tristate.False
  41. }
  42. return &HPSARaidPhyDev{
  43. RaidBasePhyDev: b,
  44. addr: addr,
  45. }
  46. }
  47. func (dev *HPSARaidPhyDev) ToBaremetalStorage(index int) *baremetal.BaremetalStorage {
  48. s := dev.RaidBasePhyDev.ToBaremetalStorage(index)
  49. s.Addr = dev.addr
  50. return s
  51. }
  52. func (dev *HPSARaidPhyDev) parseLine(line string) bool {
  53. key, val := stringutils.SplitKeyValue(line)
  54. if key == "" {
  55. return false
  56. }
  57. switch key {
  58. case "Size":
  59. dat := strings.Split(val, " ")
  60. szStr, unitStr := dat[0], dat[1]
  61. var sz int64
  62. szF, err := strconv.ParseFloat(szStr, 64)
  63. if err != nil {
  64. log.Errorf("Parse size string %s: %v", szStr, err)
  65. return false
  66. }
  67. switch unitStr {
  68. case "GB":
  69. sz = int64(szF * 1000 * 1000 * 1000)
  70. case "TB":
  71. sz = int64(szF * 1000 * 1000 * 1000 * 1000)
  72. case "MB":
  73. sz = int64(szF * 1000 * 1000)
  74. default:
  75. log.Errorf("Unsupported unit: %s", unitStr)
  76. return false
  77. }
  78. dev.Size = sz / 1024 / 1024
  79. case "Model":
  80. dev.Model = strings.Join(regexp.MustCompile(`\s+`).Split(val, -1), " ")
  81. case "Status":
  82. dev.Status = val
  83. default:
  84. return false
  85. }
  86. return true
  87. }
  88. func (dev *HPSARaidPhyDev) isComplete() bool {
  89. if !dev.RaidBasePhyDev.IsComplete() {
  90. return false
  91. }
  92. if dev.Size < 0 {
  93. return false
  94. }
  95. return true
  96. }
  97. func GetSpecString(dev *baremetal.BaremetalStorage) string {
  98. return dev.Addr
  99. }
  100. type HPSARaidAdaptor struct {
  101. index int
  102. raid *HPSARaid
  103. devs []*HPSARaidPhyDev
  104. }
  105. func newHPSARaidAdaptor(index int, raid *HPSARaid) *HPSARaidAdaptor {
  106. return &HPSARaidAdaptor{
  107. index: index,
  108. raid: raid,
  109. }
  110. }
  111. func (adapter *HPSARaidAdaptor) GetIndex() int {
  112. return adapter.index
  113. }
  114. func (adapter *HPSARaidAdaptor) PreBuildRaid(confs []*api.BaremetalDiskConfig) error {
  115. return nil
  116. }
  117. func (adapter *HPSARaidAdaptor) PostBuildRaid() error {
  118. return nil
  119. }
  120. func (adapter *HPSARaidAdaptor) ParsePhyDevs() error {
  121. parseByCmd := func(cmd string, isRotate bool) error {
  122. ret, err := adapter.raid.term.Run(cmd)
  123. if err != nil {
  124. return err
  125. }
  126. adapter.parsePhyDevs(ret, isRotate)
  127. return nil
  128. }
  129. cmd1 := GetCommand("controller", fmt.Sprintf("slot=%d", adapter.index), "ssdphysicaldrive", "all", "show", "detail")
  130. cmd2 := GetCommand("controller", fmt.Sprintf("slot=%d", adapter.index), "physicaldrive", "all", "show", "detail")
  131. var err1 error
  132. var err2 error
  133. if err1 = parseByCmd(cmd1, false); err1 != nil {
  134. err1 = errors.Errorf("parsePhyDevs by cmd %q: %v", cmd1, err1)
  135. }
  136. if err2 = parseByCmd(cmd2, true); err2 != nil {
  137. err2 = errors.Errorf("parsePhyDevs by cmd %q: %v", cmd1, err2)
  138. }
  139. if err1 != nil && err2 != nil {
  140. return errors.Errorf("ssd: %v, hdd: %v", err1, err2)
  141. }
  142. return nil
  143. }
  144. func (adapter *HPSARaidAdaptor) parsePhyDevs(lines []string, isRotate bool) {
  145. var phydev *HPSARaidPhyDev
  146. for _, line := range lines {
  147. m := regutils2.SubGroupMatch(`physicaldrive\s+(?P<addr>\w+:\w+:\w)`, line)
  148. if len(m) != 0 {
  149. phydev = newHPSARaidPhyDev(m["addr"], adapter.index, isRotate)
  150. } else if phydev != nil && phydev.parseLine(line) && phydev.isComplete() {
  151. oldDev := adapter.getPhyDevByAddr(phydev.addr)
  152. if oldDev == nil {
  153. adapter.devs = append(adapter.devs, phydev)
  154. }
  155. phydev = nil
  156. }
  157. }
  158. }
  159. func (adapter *HPSARaidAdaptor) getPhyDevByAddr(addr string) *HPSARaidPhyDev {
  160. for _, dev := range adapter.devs {
  161. if addr == dev.addr {
  162. return dev
  163. }
  164. }
  165. return nil
  166. }
  167. func (adapter *HPSARaidAdaptor) GetDevices() []*baremetal.BaremetalStorage {
  168. ret := []*baremetal.BaremetalStorage{}
  169. for idx, dev := range adapter.devs {
  170. ret = append(ret, dev.ToBaremetalStorage(idx))
  171. }
  172. return ret
  173. }
  174. func (adapter *HPSARaidAdaptor) conf2Params(conf *api.BaremetalDiskConfig) []string {
  175. params := []string{}
  176. if conf.Direct != nil {
  177. if *(conf.Direct) {
  178. params = append(params, "caching=disable")
  179. } else {
  180. params = append(params, "caching=enable")
  181. }
  182. }
  183. if conf.Strip != nil {
  184. params = append(params, fmt.Sprintf("stripsize=%d", *(conf.Strip)))
  185. }
  186. return params
  187. }
  188. func (adapter *HPSARaidAdaptor) getLastArray() (string, error) {
  189. cmd := GetCommand("controller", fmt.Sprintf("slot=%d", adapter.index), "logicaldrive", "all", "show")
  190. ret, _ := adapter.raid.term.Run(cmd)
  191. // ignore errors
  192. // if err != nil {
  193. // return "", err
  194. // }
  195. var lastArray string
  196. for _, line := range ret {
  197. m := regutils2.SubGroupMatch(`array\s+(?P<idx>\w+)`, line)
  198. if len(m) > 0 {
  199. lastArray = m["idx"]
  200. return lastArray, nil
  201. }
  202. }
  203. return "", nil
  204. }
  205. func (adapter *HPSARaidAdaptor) buildRaid(level string, devs []*baremetal.BaremetalStorage, conf *api.BaremetalDiskConfig) error {
  206. labels := []string{}
  207. for _, dev := range devs {
  208. labels = append(labels, fmt.Sprintf("%s", GetSpecString(dev)))
  209. }
  210. args := []string{
  211. "controller", fmt.Sprintf("slot=%d", adapter.GetIndex()),
  212. "create", "type=ld", fmt.Sprintf("drives=%s", strings.Join(labels, ",")),
  213. fmt.Sprintf("raid=%s", level),
  214. }
  215. if len(conf.Size) > 0 {
  216. args = append(args, fmt.Sprintf("size=%d", conf.Size[0]))
  217. }
  218. params := adapter.conf2Params(conf)
  219. args = append(args, params...)
  220. cmd := GetCommand(args...)
  221. _, err := adapter.raid.term.RunWithInput(strings.NewReader("y\n"), cmd)
  222. if err != nil {
  223. return err
  224. }
  225. if len(conf.Size) > 0 {
  226. array, err := adapter.getLastArray()
  227. if err != nil {
  228. return fmt.Errorf("getLastArray: %v", err)
  229. }
  230. cmds := []string{}
  231. restSize := conf.Size[1:]
  232. for idx, sz := range restSize {
  233. isLast := idx == len(restSize)-1
  234. sizeStr := fmt.Sprintf("size=%d", sz)
  235. if isLast {
  236. sizeStr = "size=max"
  237. }
  238. args = []string{"controller", fmt.Sprintf("slot=%d", adapter.index),
  239. "array", array, "create", "type=ld",
  240. fmt.Sprintf("raid=%s", level),
  241. sizeStr,
  242. }
  243. args = append(args, params...)
  244. cmds = append(cmds, GetCommand(args...))
  245. }
  246. _, err = adapter.raid.term.RunWithInput(strings.NewReader("y\n"), cmds...)
  247. }
  248. return err
  249. }
  250. func (adapter *HPSARaidAdaptor) BuildRaid0(devs []*baremetal.BaremetalStorage, conf *api.BaremetalDiskConfig) error {
  251. return adapter.buildRaid("0", devs, conf)
  252. }
  253. func (adapter *HPSARaidAdaptor) BuildRaid1(devs []*baremetal.BaremetalStorage, conf *api.BaremetalDiskConfig) error {
  254. return adapter.buildRaid("1", devs, conf)
  255. }
  256. func (adapter *HPSARaidAdaptor) BuildRaid5(devs []*baremetal.BaremetalStorage, conf *api.BaremetalDiskConfig) error {
  257. return adapter.buildRaid("5", devs, conf)
  258. }
  259. func (adapter *HPSARaidAdaptor) BuildRaid10(devs []*baremetal.BaremetalStorage, conf *api.BaremetalDiskConfig) error {
  260. return adapter.buildRaid("1+0", devs, conf)
  261. }
  262. func (adapter *HPSARaidAdaptor) BuildNoneRaid(devs []*baremetal.BaremetalStorage) error {
  263. for _, d := range devs {
  264. // WT|WB] [NORA|RA] [Direct|Cached] [CachedBadBBU|NoCachedBadBBU]
  265. useWT := true
  266. useDirect := true
  267. if err := adapter.buildRaid("0", []*baremetal.BaremetalStorage{d}, &api.BaremetalDiskConfig{WT: &useWT, Direct: &useDirect}); err != nil {
  268. return err
  269. }
  270. }
  271. return nil
  272. }
  273. func (adapter *HPSARaidAdaptor) removeLogicVolume(idx int) error {
  274. cmd := GetCommand("controller", fmt.Sprintf("slot=%d", adapter.index), "logicaldrive",
  275. fmt.Sprintf("%d", idx), "delete", "forced")
  276. _, err := adapter.raid.term.Run(cmd)
  277. return err
  278. }
  279. func (adapter *HPSARaidAdaptor) GetLogicVolumes() ([]*raid.RaidLogicalVolume, error) {
  280. cmd := GetCommand("controller", fmt.Sprintf("slot=%d", adapter.index), "logicaldrive", "all", "show")
  281. ret, _ := adapter.raid.term.Run(cmd)
  282. // ignore error
  283. // if err != nil {
  284. // return nil, err
  285. // }
  286. return adapter.parseLogicalVolumes(ret)
  287. }
  288. func (adapter *HPSARaidAdaptor) parseLogicalVolumes(lines []string) ([]*raid.RaidLogicalVolume, error) {
  289. lvs := []*raid.RaidLogicalVolume{}
  290. for _, line := range lines {
  291. m := regutils2.SubGroupMatch(`logicaldrive\s+(?P<addr>\w+)`, line)
  292. if len(m) > 0 {
  293. idxStr := m["addr"]
  294. idx, err := strconv.Atoi(idxStr)
  295. if err != nil {
  296. return nil, fmt.Errorf("%s not int: %v", idxStr, err)
  297. }
  298. lvs = append(lvs, &raid.RaidLogicalVolume{
  299. Index: idx,
  300. Adapter: adapter.index,
  301. })
  302. }
  303. }
  304. return lvs, nil
  305. }
  306. func (adapter *HPSARaidAdaptor) RemoveLogicVolumes() error {
  307. lvs, err := adapter.GetLogicVolumes()
  308. if err != nil {
  309. return fmt.Errorf("Failed to get logic volumes: %v", err)
  310. }
  311. for _, i := range raid.ReverseLogicalArray(lvs) {
  312. if err := adapter.removeLogicVolume(i.Index); err != nil {
  313. return fmt.Errorf("Remove %#v logical volume: %v", i, err)
  314. }
  315. }
  316. return nil
  317. }
  318. type HPSARaid struct {
  319. term raid.IExecTerm
  320. adapters []*HPSARaidAdaptor
  321. }
  322. func NewHPSARaid(term raid.IExecTerm) raid.IRaidDriver {
  323. return &HPSARaid{
  324. term: term,
  325. adapters: make([]*HPSARaidAdaptor, 0),
  326. }
  327. }
  328. func (r *HPSARaid) ParsePhyDevs() error {
  329. if !utils.IsInStringArray(raid.MODULE_HPSA, raid.GetModules(r.term)) {
  330. return fmt.Errorf("Not found hpsa module")
  331. }
  332. cmd := GetCommand("controller", "all", "show")
  333. ret, err := r.term.Run(cmd)
  334. if err != nil {
  335. return err
  336. }
  337. return r.parsePhyDevs(ret)
  338. }
  339. func (r *HPSARaid) parsePhyDevs(lines []string) error {
  340. for _, line := range lines {
  341. m := regutils2.SubGroupMatch(`\s+Slot\s+(?P<idx>[0-9]+)\s+`, line)
  342. if len(m) > 0 {
  343. idxStr := m["idx"]
  344. idx, err := strconv.Atoi(idxStr)
  345. if err != nil {
  346. return err
  347. }
  348. adapter := newHPSARaidAdaptor(idx, r)
  349. r.adapters = append(r.adapters, adapter)
  350. }
  351. }
  352. var errs []error
  353. for _, a := range r.adapters {
  354. err := a.ParsePhyDevs()
  355. if err != nil {
  356. log.Errorf("parse adapter %d fail: %s", a.GetIndex(), err)
  357. errs = append(errs, err)
  358. }
  359. }
  360. if len(errs) == len(r.adapters) {
  361. // all failed
  362. return errors.NewAggregate(errs)
  363. }
  364. return nil
  365. }
  366. func (r *HPSARaid) PreBuildRaid(_ []*api.BaremetalDiskConfig, _ int) error {
  367. return nil
  368. }
  369. func (r *HPSARaid) GetAdapters() []raid.IRaidAdapter {
  370. ret := make([]raid.IRaidAdapter, 0)
  371. for _, a := range r.adapters {
  372. ret = append(ret, a)
  373. }
  374. return ret
  375. }
  376. func (r *HPSARaid) GetName() string {
  377. return baremetal.DISK_DRIVER_HPSARAID
  378. }
  379. func (r *HPSARaid) CleanRaid() error {
  380. // pass
  381. return nil
  382. }
  383. func GetCommand(args ...string) string {
  384. bin := "/opt/hp/hpssacli/bld/hpssacli"
  385. return raid.GetCommand(bin, args...)
  386. }
  387. func init() {
  388. raid.RegisterDriver(baremetal.DISK_DRIVER_HPSARAID, NewHPSARaid)
  389. }