cpu_darwin_cgo.go 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112
  1. // +build darwin
  2. // +build cgo
  3. package cpu
  4. /*
  5. #include <stdlib.h>
  6. #include <sys/sysctl.h>
  7. #include <sys/mount.h>
  8. #include <mach/mach_init.h>
  9. #include <mach/mach_host.h>
  10. #include <mach/host_info.h>
  11. #include <TargetConditionals.h>
  12. #if TARGET_OS_MAC
  13. #include <libproc.h>
  14. #endif
  15. #include <mach/processor_info.h>
  16. #include <mach/vm_map.h>
  17. */
  18. import "C"
  19. import (
  20. "bytes"
  21. "encoding/binary"
  22. "fmt"
  23. "unsafe"
  24. )
  25. // these CPU times for darwin is borrowed from influxdb/telegraf.
  26. func perCPUTimes() ([]TimesStat, error) {
  27. var (
  28. count C.mach_msg_type_number_t
  29. cpuload *C.processor_cpu_load_info_data_t
  30. ncpu C.natural_t
  31. )
  32. status := C.host_processor_info(C.host_t(C.mach_host_self()),
  33. C.PROCESSOR_CPU_LOAD_INFO,
  34. &ncpu,
  35. (*C.processor_info_array_t)(unsafe.Pointer(&cpuload)),
  36. &count)
  37. if status != C.KERN_SUCCESS {
  38. return nil, fmt.Errorf("host_processor_info error=%d", status)
  39. }
  40. // jump through some cgo casting hoops and ensure we properly free
  41. // the memory that cpuload points to
  42. target := C.vm_map_t(C.mach_task_self_)
  43. address := C.vm_address_t(uintptr(unsafe.Pointer(cpuload)))
  44. defer C.vm_deallocate(target, address, C.vm_size_t(ncpu))
  45. // the body of struct processor_cpu_load_info
  46. // aka processor_cpu_load_info_data_t
  47. var cpu_ticks [C.CPU_STATE_MAX]uint32
  48. // copy the cpuload array to a []byte buffer
  49. // where we can binary.Read the data
  50. size := int(ncpu) * binary.Size(cpu_ticks)
  51. buf := (*[1 << 30]byte)(unsafe.Pointer(cpuload))[:size:size]
  52. bbuf := bytes.NewBuffer(buf)
  53. var ret []TimesStat
  54. for i := 0; i < int(ncpu); i++ {
  55. err := binary.Read(bbuf, binary.LittleEndian, &cpu_ticks)
  56. if err != nil {
  57. return nil, err
  58. }
  59. c := TimesStat{
  60. CPU: fmt.Sprintf("cpu%d", i),
  61. User: float64(cpu_ticks[C.CPU_STATE_USER]) / ClocksPerSec,
  62. System: float64(cpu_ticks[C.CPU_STATE_SYSTEM]) / ClocksPerSec,
  63. Nice: float64(cpu_ticks[C.CPU_STATE_NICE]) / ClocksPerSec,
  64. Idle: float64(cpu_ticks[C.CPU_STATE_IDLE]) / ClocksPerSec,
  65. }
  66. ret = append(ret, c)
  67. }
  68. return ret, nil
  69. }
  70. func allCPUTimes() ([]TimesStat, error) {
  71. var count C.mach_msg_type_number_t
  72. var cpuload C.host_cpu_load_info_data_t
  73. count = C.HOST_CPU_LOAD_INFO_COUNT
  74. status := C.host_statistics(C.host_t(C.mach_host_self()),
  75. C.HOST_CPU_LOAD_INFO,
  76. C.host_info_t(unsafe.Pointer(&cpuload)),
  77. &count)
  78. if status != C.KERN_SUCCESS {
  79. return nil, fmt.Errorf("host_statistics error=%d", status)
  80. }
  81. c := TimesStat{
  82. CPU: "cpu-total",
  83. User: float64(cpuload.cpu_ticks[C.CPU_STATE_USER]) / ClocksPerSec,
  84. System: float64(cpuload.cpu_ticks[C.CPU_STATE_SYSTEM]) / ClocksPerSec,
  85. Nice: float64(cpuload.cpu_ticks[C.CPU_STATE_NICE]) / ClocksPerSec,
  86. Idle: float64(cpuload.cpu_ticks[C.CPU_STATE_IDLE]) / ClocksPerSec,
  87. }
  88. return []TimesStat{c}, nil
  89. }